diff --git a/.ci-operator.yaml b/.ci-operator.yaml index 1e59c02c25..64887a08b4 100644 --- a/.ci-operator.yaml +++ b/.ci-operator.yaml @@ -1,4 +1,4 @@ build_root_image: name: release namespace: openshift - tag: rhel-9-release-golang-1.22-openshift-4.17 + tag: rhel-9-release-golang-1.22-openshift-4.18 diff --git a/.github/ISSUE_TEMPLATE/bug-report.yaml b/.github/ISSUE_TEMPLATE/bug-report.yaml index 310fbfd389..44f42ebfe2 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yaml +++ b/.github/ISSUE_TEMPLATE/bug-report.yaml @@ -8,7 +8,7 @@ body: label: What happened? description: | Please provide as much info as possible. Not doing so may result in your bug not being addressed in a timely manner. - If this matter is security related, please disclose it privately via https://github.com/ovn-org/ovn-kubernetes/blob/master/SECURITY.md + If this matter is security related, please disclose it privately via https://github.com/ovn-kubernetes/ovn-kubernetes/blob/master/SECURITY.md validations: required: true diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 049a138367..a2541e4b6f 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,6 +1,6 @@ -#### What this PR does and why is it needed - +## 📑 Description + -#### Which issue(s) this PR fixes Fixes # -#### Special notes for reviewers +## Additional Information for reviewers -#### How to verify it +## ✅ Checks + +- [ ] My code requires changes to the documentation +- [ ] if so, I have updated the documentation as required +- [ ] My code requires tests +- [ ] if so, I have added and/or updated the tests as required +- [ ] All the tests have passed in the CI + +## How to verify it - -#### Details to documentation updates - - - -#### Description for the changelog - - -#### Does this PR introduce a user-facing change? - -```release-note - -``` diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 506210ce70..deb187706c 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -11,7 +11,7 @@ permissions: env: GO_VERSION: 1.22.0 REGISTRY: ghcr.io - OWNER: ovn-org + OWNER: ovn-kubernetes REPOSITORY: ovn-kubernetes FEDORA_IMAGE_NAME: ovn-kube-fedora UBUNTU_IMAGE_NAME: ovn-kube-ubuntu diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 47a2a7fa84..a50e1c9fb7 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -17,7 +17,7 @@ concurrency: env: GO_VERSION: 1.22.0 - K8S_VERSION: v1.30.2 + K8S_VERSION: v1.31.0 KIND_CLUSTER_NAME: ovn KIND_INSTALL_INGRESS: true KIND_ALLOW_SYSTEM_WRITES: true @@ -83,8 +83,8 @@ jobs: exit 0 fi - if docker pull ghcr.io/ovn-org/ovn-kubernetes/ovn-kube-fedora:master; then - docker tag ghcr.io/ovn-org/ovn-kubernetes/ovn-kube-fedora:master ovn-daemonset-fedora:dev + if docker pull ghcr.io/ovn-kubernetes/ovn-kubernetes/ovn-kube-fedora:master; then + docker tag ghcr.io/ovn-kubernetes/ovn-kubernetes/ovn-kube-fedora:master ovn-daemonset-fedora:dev echo "MASTER_IMAGE_RESTORED=true" >> "$GITHUB_OUTPUT" exit 0 @@ -226,8 +226,8 @@ jobs: go get github.com/modocache/gover PATH=$PATH:$(go env GOPATH)/bin - mkdir -p $(go env GOPATH)/src/github.com/ovn-org - ln -sf $(pwd) $(go env GOPATH)/src/github.com/ovn-org/ovn-kubernetes + mkdir -p $(go env GOPATH)/src/github.com/ovn-kubernetes + ln -sf $(pwd) $(go env GOPATH)/src/github.com/ovn-kubernetes/ovn-kubernetes gover goveralls -coverprofile=gover.coverprofile -service=github @@ -433,15 +433,16 @@ jobs: - {"target": "kv-live-migration", "ha": "noHA", "gateway-mode": "local", "ipfamily": "ipv4", "disable-snat-multiple-gws": "SnatGW", "second-bridge": "1br", "ic": "ic-disabled", "num-workers": "3"} - {"target": "kv-live-migration", "ha": "noHA", "gateway-mode": "shared", "ipfamily": "dualstack", "disable-snat-multiple-gws": "SnatGW", "second-bridge": "1br", "ic": "ic-single-node-zones", "num-workers": "3"} - {"target": "control-plane", "ha": "noHA", "gateway-mode": "shared", "ipfamily": "ipv4", "disable-snat-multiple-gws": "SnatGW", "second-bridge": "1br", "ic": "ic-single-node-zones", "forwarding": "disable-forwarding"} - - {"target": "network-segmentation", "ha": "noHA", "gateway-mode": "shared", "ipfamily": "dualstack", "disable-snat-multiple-gws": "SnatGW", "second-bridge": "1br", "ic": "ic-single-node-zones"} - - {"target": "network-segmentation", "ha": "noHA", "gateway-mode": "local", "ipfamily": "dualstack", "disable-snat-multiple-gws": "SnatGW", "second-bridge": "1br", "ic": "ic-single-node-zones"} + - {"target": "network-segmentation", "ha": "noHA", "gateway-mode": "shared", "ipfamily": "dualstack", "disable-snat-multiple-gws": "noSnatGW", "second-bridge": "1br", "ic": "ic-single-node-zones", "forwarding": "disable-forwarding"} + - {"target": "network-segmentation", "ha": "noHA", "gateway-mode": "local", "ipfamily": "dualstack", "disable-snat-multiple-gws": "noSnatGW", "second-bridge": "1br", "ic": "ic-single-node-zones"} - {"target": "network-segmentation", "ha": "noHA", "gateway-mode": "shared", "ipfamily": "dualstack", "disable-snat-multiple-gws": "SnatGW", "second-bridge": "1br", "ic": "ic-disabled"} + - {"target": "network-segmentation", "ha": "noHA", "gateway-mode": "shared", "ipfamily": "ipv4", "disable-snat-multiple-gws": "noSnatGW", "second-bridge": "1br", "ic": "ic-single-node-zones"} - {"target": "tools", "ha": "noHA", "gateway-mode": "local", "ipfamily": "dualstack", "disable-snat-multiple-gws": "SnatGW", "second-bridge": "1br", "ic": "ic-single-node-zones"} needs: [ build-pr ] env: JOB_NAME: "${{ matrix.target }}-${{ matrix.ha }}-${{ matrix.gateway-mode }}-${{ matrix.ipfamily }}-${{ matrix.disable-snat-multiple-gws }}-${{ matrix.second-bridge }}-${{ matrix.ic }}" - OVN_HYBRID_OVERLAY_ENABLE: "${{ matrix.target == 'control-plane' || matrix.target == 'control-plane-helm' }}" - OVN_MULTICAST_ENABLE: "${{ matrix.target == 'control-plane' || matrix.target == 'control-plane-helm' }}" + OVN_HYBRID_OVERLAY_ENABLE: ${{ (matrix.target == 'control-plane' || matrix.target == 'control-plane-helm') && (matrix.ipfamily == 'ipv4' || matrix.ipfamily == 'dualstack' ) }} + OVN_MULTICAST_ENABLE: "${{ matrix.target == 'control-plane' || matrix.target == 'control-plane-helm' || matrix.target == 'network-segmentation' }}" OVN_EMPTY_LB_EVENTS: "${{ matrix.target == 'control-plane' || matrix.target == 'control-plane-helm' }}" OVN_HA: "${{ matrix.ha == 'HA' }}" OVN_DISABLE_SNAT_MULTIPLE_GWS: "${{ matrix.disable-snat-multiple-gws == 'noSnatGW' }}" @@ -451,7 +452,8 @@ jobs: KIND_IPV4_SUPPORT: "${{ matrix.ipfamily == 'IPv4' || matrix.ipfamily == 'dualstack' }}" KIND_IPV6_SUPPORT: "${{ matrix.ipfamily == 'IPv6' || matrix.ipfamily == 'dualstack' }}" ENABLE_MULTI_NET: "${{ matrix.target == 'multi-homing' || matrix.target == 'kv-live-migration' || matrix.target == 'network-segmentation' || matrix.target == 'tools' || matrix.target == 'multi-homing-helm' }}" - ENABLE_NETWORK_SEGMENTATION: "${{ matrix.target == 'network-segmentation' || matrix.target == 'tools'}}" + ENABLE_NETWORK_SEGMENTATION: "${{ matrix.target == 'network-segmentation' || matrix.target == 'tools' || matrix.target == 'kv-live-migration'}}" + DISABLE_UDN_HOST_ISOLATION: "true" KIND_INSTALL_KUBEVIRT: "${{ matrix.target == 'kv-live-migration' }}" OVN_COMPACT_MODE: "${{ matrix.target == 'compact-mode' }}" OVN_DUMMY_GATEWAY_BRIDGE: "${{ matrix.target == 'compact-mode' }}" @@ -531,7 +533,7 @@ jobs: run: | # used by e2e diagnostics package export OVN_IMAGE="ovn-daemonset-fedora:pr" - + if [ "${{ matrix.target }}" == "multi-homing" ] || [ "${{ matrix.target }}" == "multi-homing-helm" ]; then make -C test control-plane WHAT="Multi Homing" elif [ "${{ matrix.target }}" == "node-ip-mac-migration" ]; then @@ -617,6 +619,17 @@ jobs: echo "GOPATH=$GOPATH" >> $GITHUB_ENV echo "$GOPATH/bin" >> $GITHUB_PATH + - name: Free up disk space + run: | + sudo rm -rf /usr/local/lib/android/sdk + sudo apt-get update + sudo eatmydata apt-get purge --auto-remove -y \ + azure-cli aspnetcore-* dotnet-* ghc-* firefox \ + google-chrome-stable \ + llvm-* microsoft-edge-stable mono-* \ + msbuild mysql-server-core-* php-* php7* \ + powershell temurin-* zulu-* + - name: Disable ufw # For IPv6 and Dualstack, ufw (Uncomplicated Firewall) should be disabled. # Not needed for KIND deployments, so just disable all the time. diff --git a/.gitignore b/.gitignore index 341d99aa8c..513e0d3421 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,5 @@ contrib/bin ovn-kubernetes-anp-test-report.yaml + +**/ginkgo.report diff --git a/CODEOWNERS b/CODEOWNERS index 161a0e9e8c..fcd3dbc423 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1 +1 @@ -* @ovn-org/ovn-kubernetes-members +* @ovn-kubernetes/ovn-kubernetes-members diff --git a/Dockerfile b/Dockerfile index 43d0d12d7e..8bfaa6f9d9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -45,8 +45,8 @@ RUN INSTALL_PKGS=" \ ethtool conntrack-tools \ openshift-clients \ " && \ - dnf install -y --nodocs $INSTALL_PKGS && \ - eval "dnf install -y --nodocs $(cat /more-pkgs)" && \ + dnf --setopt=retries=2 --setopt=timeout=2 install -y --nodocs $INSTALL_PKGS && \ + eval "dnf --setopt=retries=2 --setopt=timeout=2 install -y --nodocs $(cat /more-pkgs)" && \ dnf clean all && rm -rf /var/cache/* COPY --from=builder /go/src/github.com/openshift/ovn-kubernetes/go-controller/_output/go/bin/ovnkube /usr/bin/ @@ -57,6 +57,7 @@ COPY --from=builder /go/src/github.com/openshift/ovn-kubernetes/go-controller/_o COPY --from=builder /go/src/github.com/openshift/ovn-kubernetes/go-controller/_output/go/bin/ovndbchecker /usr/bin/ COPY --from=builder /go/src/github.com/openshift/ovn-kubernetes/go-controller/_output/go/bin/ovnkube-trace /usr/bin/ COPY --from=builder /go/src/github.com/openshift/ovn-kubernetes/go-controller/_output/go/bin/hybrid-overlay-node /usr/bin/ +COPY --from=builder /go/src/github.com/openshift/ovn-kubernetes/go-controller/_output/go/bin/ovnkube-observ /usr/bin/ # Copy RHEL-8 and RHEL-9 shim binaries where the CNO's ovnkube-node container startup script can find them RUN mkdir -p /usr/libexec/cni/rhel9 diff --git a/Dockerfile.base b/Dockerfile.base index 7a4ad37979..6f5d0b6956 100644 --- a/Dockerfile.base +++ b/Dockerfile.base @@ -8,24 +8,26 @@ FROM registry.ci.openshift.org/ocp/4.17:base-rhel9 # install selinux-policy first to avoid a race -RUN dnf install -y --nodocs \ +RUN dnf --setopt=retries=2 --setopt=timeout=2 install -y --nodocs \ selinux-policy procps-ng && \ dnf clean all ARG ovsver=3.4.0-18.el9fdp -ARG ovnver=24.03.2-32.el9fdp +ARG ovnver=24.09.0-33.el9fdp # NOTE: Ensure that the versions of OVS and OVN are overriden for OKD in each of the subsequent layers. -ARG ovsver_okd=3.4.0-0.8.el9s -ARG ovnver_okd=24.03.1-5.el9s +# Centos and RHEL releases for ovn are built out of sync, so please make sure to bump for OKD with +# the corresponding Centos version when updating the OCP version. +ARG ovsver_okd=3.4.0-12.el9s +ARG ovnver_okd=24.09.0-41.el9s RUN INSTALL_PKGS="iptables nftables" && \ source /etc/os-release && \ [ "${ID}" == "centos" ] && ovsver=$ovsver_okd && ovnver=$ovnver_okd; \ ovsver_short=$(echo "$ovsver" | cut -d'.' -f1,2) && \ ovnver_short=$(echo "$ovnver" | cut -d'.' -f1,2) && \ - dnf install -y --nodocs $INSTALL_PKGS && \ - dnf install -y --nodocs "openvswitch$ovsver_short = $ovsver" "python3-openvswitch$ovsver_short = $ovsver" && \ - dnf install -y --nodocs "ovn$ovnver_short = $ovnver" "ovn$ovnver_short-central = $ovnver" "ovn$ovnver_short-host = $ovnver" && \ + dnf --setopt=retries=2 --setopt=timeout=2 install -y --nodocs $INSTALL_PKGS && \ + dnf --setopt=retries=2 --setopt=timeout=2 install -y --nodocs "openvswitch$ovsver_short = $ovsver" "python3-openvswitch$ovsver_short = $ovsver" && \ + dnf --setopt=retries=2 --setopt=timeout=2 install -y --nodocs "ovn$ovnver_short = $ovnver" "ovn$ovnver_short-central = $ovnver" "ovn$ovnver_short-host = $ovnver" && \ dnf clean all && rm -rf /var/cache/* && \ sed 's/%/"/g' <<<"%openvswitch$ovsver_short-devel = $ovsver% %openvswitch$ovsver_short-ipsec = $ovsver% %ovn$ovnver_short-vtep = $ovnver%" > /more-pkgs diff --git a/Dockerfile.microshift b/Dockerfile.microshift index 9d02643e69..1064486dc1 100644 --- a/Dockerfile.microshift +++ b/Dockerfile.microshift @@ -12,7 +12,7 @@ # openvswitch-devel, openvswitch-ipsec, libpcap, iproute etc # ovn-kube-util, hybrid-overlay-node.exe, ovndbchecker and ovnkube-trace -FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.22-openshift-4.17 AS builder +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.22-openshift-4.18 AS builder WORKDIR /go/src/github.com/openshift/ovn-kubernetes COPY . . @@ -20,7 +20,7 @@ COPY . . # build the binaries RUN cd go-controller; CGO_ENABLED=0 make -FROM registry.ci.openshift.org/ocp/4.17:ovn-kubernetes-base +FROM registry.ci.openshift.org/ocp/4.18:ovn-kubernetes-base USER root diff --git a/README.md b/README.md index af532610f5..2ca002ac3f 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,7 @@ [![Go Report Card][go-report-card-badge]][go-report-url] [![Go Doc][go-doc-badge]][go-doc-url] [![Static Badge][slack-badge]][slack-url] +[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fovn-kubernetes%2Fovn-kubernetes.svg?type=shield)](https://app.fossa.com/projects/git%2Bgithub.com%2Fovn-kubernetes%2Fovn-kubernetes?ref=badge_shield) [apache2-badge]: https://img.shields.io/badge/License-Apache%202.0-blue.svg @@ -45,7 +46,10 @@ Here are some links to help in your ovn-kubernetes journey: Everything is distributed under the terms of the [Apache License] (version 2.0). + +[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fovn-kubernetes%2Fovn-kubernetes.svg?type=large)](https://app.fossa.com/projects/git%2Bgithub.com%2Fovn-kubernetes%2Fovn-kubernetes?ref=badge_large) + ## Who uses OVN-Kubernetes? See our [Adopters](ADOPTERS.md). If your organization or project uses OVN-Kubernetes, -please file a PR and update this list. Say hi on Slack too! +please file a PR and update this list. Say hi on Slack too! \ No newline at end of file diff --git a/contrib/kind-common b/contrib/kind-common index 0d49fdbe5d..ff96699030 100644 --- a/contrib/kind-common +++ b/contrib/kind-common @@ -122,12 +122,13 @@ install_ingress() { METALLB_DIR="/tmp/metallb" install_metallb() { + local metallb_version=v0.14.8 mkdir -p /tmp/metallb local builddir builddir=$(mktemp -d "${METALLB_DIR}/XXXXXX") pushd "${builddir}" - git clone https://github.com/metallb/metallb.git + git clone https://github.com/metallb/metallb.git -b $metallb_version cd metallb # Use global IP next hops in IPv6 if [ "$KIND_IPV6_SUPPORT" == true ]; then @@ -320,7 +321,13 @@ is_nested_virt_enabled() { } install_kubevirt() { - local kubevirt_version="$(curl -L https://storage.googleapis.com/kubevirt-prow/release/kubevirt/kubevirt/stable.txt)" + # possible values: + # stable - install newest stable (default) + # vX.Y.Z - install specific stable (i.e v1.3.1) + # nightly - install newest nightly + # nightly tag - install specific nightly (i.e 20240910) + KUBEVIRT_VERSION=${KUBEVIRT_VERSION:-"stable"} + for node in $(kubectl get node --no-headers -o custom-columns=":metadata.name"); do $OCI_BIN exec -t $node bash -c "echo 'fs.inotify.max_user_watches=1048576' >> /etc/sysctl.conf" $OCI_BIN exec -t $node bash -c "echo 'fs.inotify.max_user_instances=512' >> /etc/sysctl.conf" @@ -329,10 +336,10 @@ install_kubevirt() { kubectl label nodes $node node-role.kubernetes.io/worker="" --overwrite=true fi done - local kubevirt_release_url="https://github.com/kubevirt/kubevirt/releases/download/${kubevirt_version}" - echo "Deploy latest nighly build Kubevirt" if [ "$(kubectl get kubevirts -n kubevirt kubevirt -ojsonpath='{.status.phase}')" != "Deployed" ]; then + local kubevirt_release_url=$(get_kubevirt_release_url "$KUBEVIRT_VERSION") + echo "Deploying Kubevirt from $kubevirt_release_url" kubectl apply -f "${kubevirt_release_url}/kubevirt-operator.yaml" kubectl apply -f "${kubevirt_release_url}/kubevirt-cr.yaml" if ! is_nested_virt_enabled; then @@ -348,6 +355,12 @@ install_kubevirt() { kubectl logs --all-containers=true -n kubevirt $p || true done fi + + kubectl -n kubevirt patch kubevirt kubevirt --type=json --patch '[{"op":"add","path":"/spec/configuration/developerConfiguration","value":{"featureGates":[]}},{"op":"add","path":"/spec/configuration/developerConfiguration/featureGates/-","value":"NetworkBindingPlugins"},{"op":"add","path":"/spec/configuration/developerConfiguration/featureGates/-","value":"DynamicPodInterfaceNaming"}]' + + local kubevirt_stable_release_url=$(get_kubevirt_release_url "stable") + local passt_binding_image="quay.io/kubevirt/network-passt-binding:${kubevirt_stable_release_url##*/}" + kubectl -n kubevirt patch kubevirt kubevirt --type=json --patch '[{"op":"add","path":"/spec/configuration/network","value":{}},{"op":"add","path":"/spec/configuration/network/binding","value":{"passt":{"computeResourceOverhead":{"requests":{"memory":"500Mi"}},"migration":{"method":"link-refresh"},"networkAttachmentDefinition":"default/primary-udn-kubevirt-binding","sidecarImage":"'"${passt_binding_image}"'"},"managedTap":{"domainAttachmentType":"managedTap","migration":{}}}}]' if [ ! -d "./bin" ] then @@ -363,8 +376,9 @@ install_kubevirt() { pushd ./bin if [ ! -f ./virtctl ]; then - cli_name="virtctl-${kubevirt_version}-${OS_TYPE}-${ARCH}" - curl -LO "${kubevirt_release_url}/${cli_name}" + kubevirt_stable_release_url=$(get_kubevirt_release_url "stable") + cli_name="virtctl-${kubevirt_stable_release_url##*/}-${OS_TYPE}-${ARCH}" + curl -LO "${kubevirt_stable_release_url}/${cli_name}" mv ${cli_name} virtctl if_error_exit "Failed to download virtctl!" fi @@ -373,12 +387,14 @@ install_kubevirt() { chmod +x ./bin/virtctl } -install_kubevirt_ipam_controller() { +install_cert_manager() { local cert_manager_version="v1.14.4" echo "Installing cert-manager ..." manifest="https://github.com/cert-manager/cert-manager/releases/download/${cert_manager_version}/cert-manager.yaml" run_kubectl apply -f "$manifest" +} +install_kubevirt_ipam_controller() { echo "Installing KubeVirt IPAM controller manager ..." manifest="https://raw.githubusercontent.com/kubevirt/ipam-extensions/main/dist/install.yaml" run_kubectl apply -f "$manifest" @@ -386,9 +402,11 @@ install_kubevirt_ipam_controller() { } install_multus() { - echo "Installing multus-cni daemonset ..." - multus_manifest="https://raw.githubusercontent.com/k8snetworkplumbingwg/multus-cni/master/deployments/multus-daemonset.yml" - run_kubectl apply -f "$multus_manifest" + local version="v4.1.3" + echo "Installing multus-cni $version daemonset ..." + wget -qO- "https://raw.githubusercontent.com/k8snetworkplumbingwg/multus-cni/${version}/deployments/multus-daemonset.yml" |\ + sed -e "s|multus-cni:snapshot|multus-cni:${version}|g" |\ + run_kubectl apply -f - } install_mpolicy_crd() { @@ -576,3 +594,116 @@ kubectl_wait_dnsnameresolver_pods() { echo "Waiting for pods in dnsnameresolver-operator namespace to become ready (timeout ${timeout})..." kubectl wait -n dnsnameresolver-operator --for=condition=ready pods --all --timeout=${timeout}s } + +deploy_kubevirt_binding() { + cat < receive_filtered.yaml + kubectl apply -f receive_filtered.yaml + popd || exit 1 + + rm -rf "${FRR_TMP_DIR}" +} diff --git a/contrib/kind-helm.sh b/contrib/kind-helm.sh index 196ac37c31..a369094175 100755 --- a/contrib/kind-helm.sh +++ b/contrib/kind-helm.sh @@ -21,6 +21,7 @@ set_default_params() { export OVN_HA=${OVN_HA:-false} export OVN_MULTICAST_ENABLE=${OVN_MULTICAST_ENABLE:-false} export OVN_HYBRID_OVERLAY_ENABLE=${OVN_HYBRID_OVERLAY_ENABLE:-false} + export OVN_OBSERV_ENABLE=${OVN_OBSERV_ENABLE:-false} export OVN_EMPTY_LB_EVENTS=${OVN_EMPTY_LB_EVENTS:-false} export KIND_REMOVE_TAINT=${KIND_REMOVE_TAINT:-true} export ENABLE_MULTI_NET=${ENABLE_MULTI_NET:-false} @@ -106,6 +107,7 @@ usage() { echo " DEFAULT: Remove taint components" echo "-me | --multicast-enabled Enable multicast. DEFAULT: Disabled" echo "-ho | --hybrid-enabled Enable hybrid overlay. DEFAULT: Disabled" + echo "-obs | --observability Enable observability. DEFAULT: Disabled" echo "-el | --ovn-empty-lb-events Enable empty-lb-events generation for LB without backends. DEFAULT: Disabled" echo "-ii | --install-ingress Flag to install Ingress Components." echo " DEFAULT: Don't install ingress components." @@ -143,6 +145,8 @@ parse_args() { ;; -ho | --hybrid-enabled ) OVN_HYBRID_OVERLAY_ENABLE=true ;; + -obs | --observability ) OVN_OBSERV_ENABLE=true + ;; -el | --ovn-empty-lb-events ) OVN_EMPTY_LB_EVENTS=true ;; -ii | --install-ingress ) KIND_INSTALL_INGRESS=true @@ -202,6 +206,7 @@ print_params() { echo "OVN_HA = $OVN_HA" echo "OVN_MULTICAST_ENABLE = $OVN_MULTICAST_ENABLE" echo "OVN_HYBRID_OVERLAY_ENABLE = $OVN_HYBRID_OVERLAY_ENABLE" + echo "OVN_OBSERV_ENABLE = $OVN_OBSERV_ENABLE" echo "OVN_EMPTY_LB_EVENTS = $OVN_EMPTY_LB_EVENTS" echo "KIND_CLUSTER_NAME = $KIND_CLUSTER_NAME" echo "KIND_REMOVE_TAINT = $KIND_REMOVE_TAINT" @@ -398,7 +403,8 @@ create_ovn_kubernetes() { --set global.enableMulticast=$(if [ "${OVN_MULTICAST_ENABLE}" == "true" ]; then echo "true"; else echo "false"; fi) \ --set global.enableMultiNetwork=$(if [ "${ENABLE_MULTI_NET}" == "true" ]; then echo "true"; else echo "false"; fi) \ --set global.enableHybridOverlay=$(if [ "${OVN_HYBRID_OVERLAY_ENABLE}" == "true" ]; then echo "true"; else echo "false"; fi) \ - --set global.emptyLbEvents=$(if [ "${OVN_EMPTY_LB_EVENTS}" == "true" ]; then echo "true"; else echo "false"; fi) \ + --set global.enableObservability=$(if [ "${OVN_OBSERV_ENABLE}" == "true" ]; then echo "true"; else echo "false"; fi) \ + --set global.emptyLbEvents=$(if [ "${OVN_EMPTY_LB_EVENTS}" == "true" ]; then echo "true"; else echo "false"; fi) \ --set global.enableDNSNameResolver=$(if [ "${OVN_ENABLE_DNSNAMERESOLVER}" == "true" ]; then echo "true"; else echo "false"; fi) \ ${ovnkube_db_options} } diff --git a/contrib/kind.sh b/contrib/kind.sh index 469e1191c5..6a997d10c9 100755 --- a/contrib/kind.sh +++ b/contrib/kind.sh @@ -77,70 +77,74 @@ usage() { echo " [-is | --ipsec]" echo " [-cm | --compact-mode]" echo " [-ic | --enable-interconnect]" + echo " [-rae | --enable-route-advertisements]" echo " [--isolated]" echo " [-dns | --enable-dnsnameresolver]" + echo " [-obs | --observability]" echo " [-h]]" echo "" - echo "-cf | --config-file Name of the KIND J2 configuration file." - echo " DEFAULT: ./kind.yaml.j2" - echo "-kt | --keep-taint Do not remove taint components." - echo " DEFAULT: Remove taint components." - echo "-ha | --ha-enabled Enable high availability. DEFAULT: HA Disabled." - echo "-scm | --separate-cluster-manager Separate cluster manager from ovnkube-master and run as a separate container within ovnkube-master deployment." - echo "-me | --multicast-enabled Enable multicast. DEFAULT: Disabled." - echo "-ho | --hybrid-enabled Enable hybrid overlay. DEFAULT: Disabled." - echo "-ds | --disable-snat-multiple-gws Disable SNAT for multiple gws. DEFAULT: Disabled." - echo "-dp | --disable-pkt-mtu-check Disable checking packet size greater than MTU. Default: Disabled" - echo "-df | --disable-forwarding Disable forwarding on OVNK managed interfaces. Default: Disabled" - echo "-ecp | --encap-port UDP port used for geneve overlay. DEFAULT: 6081" - echo "-pl | --install-cni-plugins ] Installs additional CNI network plugins. DEFAULT: Disabled" - echo "-nf | --netflow-targets Comma delimited list of ip:port or :port (using node IP) netflow collectors. DEFAULT: Disabled." - echo "-sf | --sflow-targets Comma delimited list of ip:port or :port (using node IP) sflow collectors. DEFAULT: Disabled." - echo "-if | --ipfix-targets Comma delimited list of ip:port or :port (using node IP) ipfix collectors. DEFAULT: Disabled." - echo "-ifs | --ipfix-sampling Fraction of packets that are sampled and sent to each target collector: 1 packet out of every . DEFAULT: 400 (1 out of 400 packets)." - echo "-ifm | --ipfix-cache-max-flows Maximum number of IPFIX flow records that can be cached at a time. If 0, caching is disabled. DEFAULT: Disabled." - echo "-ifa | --ipfix-cache-active-timeout Maximum period in seconds for which an IPFIX flow record is cached and aggregated before being sent. If 0, caching is disabled. DEFAULT: 60." - echo "-el | --ovn-empty-lb-events Enable empty-lb-events generation for LB without backends. DEFAULT: Disabled" - echo "-ii | --install-ingress Flag to install Ingress Components." - echo " DEFAULT: Don't install ingress components." - echo "-mlb | --install-metallb Install metallb to test service type LoadBalancer deployments" - echo "-n4 | --no-ipv4 Disable IPv4. DEFAULT: IPv4 Enabled." - echo "-i6 | --ipv6 Enable IPv6. DEFAULT: IPv6 Disabled." - echo "-wk | --num-workers Number of worker nodes. DEFAULT: HA - 2 worker" - echo " nodes and no HA - 0 worker nodes." - echo "-sw | --allow-system-writes Allow script to update system. Intended to allow" - echo " github CI to be updated with IPv6 settings." - echo " DEFAULT: Don't allow." - echo "-gm | --gateway-mode Enable 'shared' or 'local' gateway mode." - echo " DEFAULT: shared." - echo "-ov | --ovn-image Use the specified docker image instead of building locally. DEFAULT: local build." - echo "-ml | --master-loglevel Log level for ovnkube (master), DEFAULT: 5." - echo "-nl | --node-loglevel Log level for ovnkube (node), DEFAULT: 5" - echo "-dbl | --dbchecker-loglevel Log level for ovn-dbchecker (ovnkube-db), DEFAULT: 5." - echo "-ndl | --ovn-loglevel-northd Log config for ovn northd, DEFAULT: '-vconsole:info -vfile:info'." - echo "-nbl | --ovn-loglevel-nb Log config for northbound DB DEFAULT: '-vconsole:info -vfile:info'." - echo "-sbl | --ovn-loglevel-sb Log config for southboudn DB DEFAULT: '-vconsole:info -vfile:info'." - echo "-cl | --ovn-loglevel-controller Log config for ovn-controller DEFAULT: '-vconsole:info'." - echo "-lcl | --libovsdb-client-logfile Separate logs for libovsdb client into provided file. DEFAULT: do not separate." - echo "-ep | --experimental-provider Use an experimental OCI provider such as podman, instead of docker. DEFAULT: Disabled." - echo "-eb | --egress-gw-separate-bridge The external gateway traffic uses a separate bridge." - echo "-lr | --local-kind-registry Configure kind to use a local docker registry rather than manually loading images" - echo "-dd | --dns-domain Configure a custom dnsDomain for k8s services, Defaults to 'cluster.local'" - echo "-cn | --cluster-name Configure the kind cluster's name" - echo "-ric | --run-in-container Configure the script to be run from a docker container, allowing it to still communicate with the kind controlplane" - echo "-ehp | --egress-ip-healthcheck-port TCP port used for gRPC session by egress IP node check. DEFAULT: 9107 (Use "0" for legacy dial to port 9)." - echo "-is | --ipsec Enable IPsec encryption (spawns ovn-ipsec pods)" - echo "-sm | --scale-metrics Enable scale metrics" - echo "-cm | --compact-mode Enable compact mode, ovnkube master and node run in the same process." - echo "-ic | --enable-interconnect Enable interconnect with each node as a zone (only valid if OVN_HA is false)" - echo "--disable-ovnkube-identity Disable per-node cert and ovnkube-identity webhook" - echo "-npz | --nodes-per-zone If interconnect is enabled, number of nodes per zone (Default 1). If this value > 1, then (total k8s nodes (workers + 1) / num of nodes per zone) should be zero." - echo "-mtu Define the overlay mtu" - echo "--isolated Deploy with an isolated environment (no default gateway)" - echo "--delete Delete current cluster" - echo "--deploy Deploy ovn kubernetes without restarting kind" - echo "--add-nodes Adds nodes to an existing cluster. The number of nodes to be added is specified by --num-workers. Also use -ic if the cluster is using interconnect." - echo "-dns | --enable-dnsnameresolver Enable DNSNameResolver for resolving the DNS names used in the DNS rules of EgressFirewall." + echo "-cf | --config-file Name of the KIND J2 configuration file." + echo " DEFAULT: ./kind.yaml.j2" + echo "-kt | --keep-taint Do not remove taint components." + echo " DEFAULT: Remove taint components." + echo "-ha | --ha-enabled Enable high availability. DEFAULT: HA Disabled." + echo "-scm | --separate-cluster-manager Separate cluster manager from ovnkube-master and run as a separate container within ovnkube-master deployment." + echo "-me | --multicast-enabled Enable multicast. DEFAULT: Disabled." + echo "-ho | --hybrid-enabled Enable hybrid overlay. DEFAULT: Disabled." + echo "-ds | --disable-snat-multiple-gws Disable SNAT for multiple gws. DEFAULT: Disabled." + echo "-dp | --disable-pkt-mtu-check Disable checking packet size greater than MTU. Default: Disabled" + echo "-df | --disable-forwarding Disable forwarding on OVNK managed interfaces. Default: Disabled" + echo "-ecp | --encap-port UDP port used for geneve overlay. DEFAULT: 6081" + echo "-pl | --install-cni-plugins ] Installs additional CNI network plugins. DEFAULT: Disabled" + echo "-nf | --netflow-targets Comma delimited list of ip:port or :port (using node IP) netflow collectors. DEFAULT: Disabled." + echo "-sf | --sflow-targets Comma delimited list of ip:port or :port (using node IP) sflow collectors. DEFAULT: Disabled." + echo "-if | --ipfix-targets Comma delimited list of ip:port or :port (using node IP) ipfix collectors. DEFAULT: Disabled." + echo "-ifs | --ipfix-sampling Fraction of packets that are sampled and sent to each target collector: 1 packet out of every . DEFAULT: 400 (1 out of 400 packets)." + echo "-ifm | --ipfix-cache-max-flows Maximum number of IPFIX flow records that can be cached at a time. If 0, caching is disabled. DEFAULT: Disabled." + echo "-ifa | --ipfix-cache-active-timeout Maximum period in seconds for which an IPFIX flow record is cached and aggregated before being sent. If 0, caching is disabled. DEFAULT: 60." + echo "-el | --ovn-empty-lb-events Enable empty-lb-events generation for LB without backends. DEFAULT: Disabled" + echo "-ii | --install-ingress Flag to install Ingress Components." + echo " DEFAULT: Don't install ingress components." + echo "-mlb | --install-metallb Install metallb to test service type LoadBalancer deployments" + echo "-n4 | --no-ipv4 Disable IPv4. DEFAULT: IPv4 Enabled." + echo "-i6 | --ipv6 Enable IPv6. DEFAULT: IPv6 Disabled." + echo "-wk | --num-workers Number of worker nodes. DEFAULT: HA - 2 worker" + echo " nodes and no HA - 0 worker nodes." + echo "-sw | --allow-system-writes Allow script to update system. Intended to allow" + echo " github CI to be updated with IPv6 settings." + echo " DEFAULT: Don't allow." + echo "-gm | --gateway-mode Enable 'shared' or 'local' gateway mode." + echo " DEFAULT: shared." + echo "-ov | --ovn-image Use the specified docker image instead of building locally. DEFAULT: local build." + echo "-ml | --master-loglevel Log level for ovnkube (master), DEFAULT: 5." + echo "-nl | --node-loglevel Log level for ovnkube (node), DEFAULT: 5" + echo "-dbl | --dbchecker-loglevel Log level for ovn-dbchecker (ovnkube-db), DEFAULT: 5." + echo "-ndl | --ovn-loglevel-northd Log config for ovn northd, DEFAULT: '-vconsole:info -vfile:info'." + echo "-nbl | --ovn-loglevel-nb Log config for northbound DB DEFAULT: '-vconsole:info -vfile:info'." + echo "-sbl | --ovn-loglevel-sb Log config for southboudn DB DEFAULT: '-vconsole:info -vfile:info'." + echo "-cl | --ovn-loglevel-controller Log config for ovn-controller DEFAULT: '-vconsole:info'." + echo "-lcl | --libovsdb-client-logfile Separate logs for libovsdb client into provided file. DEFAULT: do not separate." + echo "-ep | --experimental-provider Use an experimental OCI provider such as podman, instead of docker. DEFAULT: Disabled." + echo "-eb | --egress-gw-separate-bridge The external gateway traffic uses a separate bridge." + echo "-lr | --local-kind-registry Configure kind to use a local docker registry rather than manually loading images" + echo "-dd | --dns-domain Configure a custom dnsDomain for k8s services, Defaults to 'cluster.local'" + echo "-cn | --cluster-name Configure the kind cluster's name" + echo "-ric | --run-in-container Configure the script to be run from a docker container, allowing it to still communicate with the kind controlplane" + echo "-ehp | --egress-ip-healthcheck-port TCP port used for gRPC session by egress IP node check. DEFAULT: 9107 (Use "0" for legacy dial to port 9)." + echo "-is | --ipsec Enable IPsec encryption (spawns ovn-ipsec pods)" + echo "-sm | --scale-metrics Enable scale metrics" + echo "-cm | --compact-mode Enable compact mode, ovnkube master and node run in the same process." + echo "-ic | --enable-interconnect Enable interconnect with each node as a zone (only valid if OVN_HA is false)" + echo "--disable-ovnkube-identity Disable per-node cert and ovnkube-identity webhook" + echo "-npz | --nodes-per-zone If interconnect is enabled, number of nodes per zone (Default 1). If this value > 1, then (total k8s nodes (workers + 1) / num of nodes per zone) should be zero." + echo "-mtu Define the overlay mtu" + echo "--isolated Deploy with an isolated environment (no default gateway)" + echo "--delete Delete current cluster" + echo "--deploy Deploy ovn kubernetes without restarting kind" + echo "--add-nodes Adds nodes to an existing cluster. The number of nodes to be added is specified by --num-workers. Also use -ic if the cluster is using interconnect." + echo "-dns | --enable-dnsnameresolver Enable DNSNameResolver for resolving the DNS names used in the DNS rules of EgressFirewall." + echo "-obs | --observability Enable OVN Observability feature." + echo "-rae | --enable-route-advertisements Enable route advertisements" echo "" } @@ -163,6 +167,8 @@ parse_args() { ;; -ikv | --install-kubevirt) KIND_INSTALL_KUBEVIRT=true ;; + -nokvipam | --opt-out-kv-ipam) KIND_OPT_OUT_KUBEVIRT_IPAM=true + ;; -ha | --ha-enabled ) OVN_HA=true ;; -me | --multicast-enabled) OVN_MULTICAST_ENABLE=true @@ -199,6 +205,8 @@ parse_args() { -ifa | --ipfix-cache-active-timeout ) shift OVN_IPFIX_CACHE_ACTIVE_TIMEOUT=$1 ;; + -obs | --observability ) OVN_OBSERV_ENABLE=true + ;; -el | --ovn-empty-lb-events ) OVN_EMPTY_LB_EVENTS=true ;; -kt | --keep-taint ) KIND_REMOVE_TAINT=false @@ -314,6 +322,8 @@ parse_args() { ;; -nse | --network-segmentation-enable) ENABLE_NETWORK_SEGMENTATION=true ;; + -rae | --route-advertisements-enable) ENABLE_ROUTE_ADVERTISEMENTS=true + ;; -ic | --enable-interconnect ) OVN_ENABLE_INTERCONNECT=true ;; --disable-ovnkube-identity) OVN_ENABLE_OVNKUBE_IDENTITY=false @@ -334,7 +344,8 @@ parse_args() { -h | --help ) usage exit ;; - * ) usage + * ) echo "Invalid option: $1" + usage exit 1 esac shift @@ -350,6 +361,7 @@ print_params() { echo "KIND_INSTALL_METALLB = $KIND_INSTALL_METALLB" echo "KIND_INSTALL_PLUGINS = $KIND_INSTALL_PLUGINS" echo "KIND_INSTALL_KUBEVIRT = $KIND_INSTALL_KUBEVIRT" + echo "KIND_OPT_OUT_KUBEVIRT_IPAM = $KIND_OPT_OUT_KUBEVIRT_IPAM" echo "OVN_HA = $OVN_HA" echo "RUN_IN_CONTAINER = $RUN_IN_CONTAINER" echo "KIND_CLUSTER_NAME = $KIND_CLUSTER_NAME" @@ -377,6 +389,7 @@ print_params() { echo "OVN_IPFIX_SAMPLING = $OVN_IPFIX_SAMPLING" echo "OVN_IPFIX_CACHE_MAX_FLOWS = $OVN_IPFIX_CACHE_MAX_FLOWS" echo "OVN_IPFIX_CACHE_ACTIVE_TIMEOUT = $OVN_IPFIX_CACHE_ACTIVE_TIMEOUT" + echo "OVN_OBSERV_ENABLE = $OVN_OBSERV_ENABLE" echo "OVN_EMPTY_LB_EVENTS = $OVN_EMPTY_LB_EVENTS" echo "OVN_MULTICAST_ENABLE = $OVN_MULTICAST_ENABLE" echo "OVN_IMAGE = $OVN_IMAGE" @@ -398,6 +411,7 @@ print_params() { echo "OVN_ISOLATED = $OVN_ISOLATED" echo "ENABLE_MULTI_NET = $ENABLE_MULTI_NET" echo "ENABLE_NETWORK_SEGMENTATION= $ENABLE_NETWORK_SEGMENTATION" + echo "ENABLE_ROUTE_ADVERTISEMENTS= $ENABLE_ROUTE_ADVERTISEMENTS" echo "OVN_ENABLE_INTERCONNECT = $OVN_ENABLE_INTERCONNECT" if [ "$OVN_ENABLE_INTERCONNECT" == true ]; then echo "KIND_NUM_NODES_PER_ZONE = $KIND_NUM_NODES_PER_ZONE" @@ -497,12 +511,13 @@ set_default_params() { fi RUN_IN_CONTAINER=${RUN_IN_CONTAINER:-false} KIND_IMAGE=${KIND_IMAGE:-kindest/node} - K8S_VERSION=${K8S_VERSION:-v1.30.2} + K8S_VERSION=${K8S_VERSION:-v1.31.1} OVN_GATEWAY_MODE=${OVN_GATEWAY_MODE:-shared} KIND_INSTALL_INGRESS=${KIND_INSTALL_INGRESS:-false} KIND_INSTALL_METALLB=${KIND_INSTALL_METALLB:-false} KIND_INSTALL_PLUGINS=${KIND_INSTALL_PLUGINS:-false} KIND_INSTALL_KUBEVIRT=${KIND_INSTALL_KUBEVIRT:-false} + KIND_OPT_OUT_KUBEVIRT_IPAM=${KIND_OPT_OUT_KUBEVIRT_IPAM:-false} OVN_HA=${OVN_HA:-false} KIND_LOCAL_REGISTRY=${KIND_LOCAL_REGISTRY:-false} KIND_LOCAL_REGISTRY_NAME=${KIND_LOCAL_REGISTRY_NAME:-kind-registry} @@ -598,12 +613,18 @@ set_default_params() { fi ENABLE_MULTI_NET=${ENABLE_MULTI_NET:-false} ENABLE_NETWORK_SEGMENTATION=${ENABLE_NETWORK_SEGMENTATION:-false} + ENABLE_ROUTE_ADVERTISEMENTS=${ENABLE_ROUTE_ADVERTISEMENTS:-false} + if [ "$ENABLE_ROUTE_ADVERTISEMENTS" == true ] && [ "$ENABLE_MULTI_NET" != true ]; then + echo "Route advertisements requires multi-network to be enabled (-mne)" + exit 1 + fi OVN_COMPACT_MODE=${OVN_COMPACT_MODE:-false} if [ "$OVN_COMPACT_MODE" == true ]; then KIND_NUM_WORKER=0 fi OVN_MTU=${OVN_MTU:-1400} OVN_ENABLE_DNSNAMERESOLVER=${OVN_ENABLE_DNSNAMERESOLVER:-false} + OVN_OBSERV_ENABLE=${OVN_OBSERV_ENABLE:-false} } check_ipv6() { @@ -737,7 +758,7 @@ create_kind_cluster() { jinjanate "${KIND_CONFIG}" -o "${KIND_CONFIG_LCL}" # Create KIND cluster. For additional debug, add '--verbosity ': 0 None .. 3 Debug - if kind get clusters | grep ovn; then + if kind get clusters | grep "${KIND_CLUSTER_NAME}"; then delete fi @@ -844,6 +865,7 @@ create_ovn_kube_manifests() { --ex-gw-network-interface="${OVN_EX_GW_NETWORK_INTERFACE}" \ --multi-network-enable="${ENABLE_MULTI_NET}" \ --network-segmentation-enable="${ENABLE_NETWORK_SEGMENTATION}" \ + --route-advertisements-enable="${ENABLE_ROUTE_ADVERTISEMENTS}" \ --ovnkube-metrics-scale-enable="${OVN_METRICS_SCALE_ENABLE}" \ --compact-mode="${OVN_COMPACT_MODE}" \ --enable-interconnect="${OVN_ENABLE_INTERCONNECT}" \ @@ -851,7 +873,9 @@ create_ovn_kube_manifests() { --enable-ovnkube-identity="${OVN_ENABLE_OVNKUBE_IDENTITY}" \ --enable-persistent-ips=true \ --mtu="${OVN_MTU}" \ - --enable-dnsnameresolver="${OVN_ENABLE_DNSNAMERESOLVER}" + --enable-dnsnameresolver="${OVN_ENABLE_DNSNAMERESOLVER}" \ + --mtu="${OVN_MTU}" \ + --enable-observ="${OVN_OBSERV_ENABLE}" popd } @@ -888,6 +912,7 @@ install_ovn_single_node_zones() { fi run_kubectl apply -f ovnkube-control-plane.yaml run_kubectl apply -f ovnkube-single-node-zone.yaml + kubectl patch ds -n ovn-kubernetes ovnkube-node --type='json' -p='[{"op": "add", "path": "/spec/updateStrategy/rollingUpdate", "value": {"maxUnavailable": "100%"}}]' } label_ovn_multiple_nodes_zones() { @@ -932,6 +957,8 @@ install_ovn() { run_kubectl apply -f k8s.ovn.org_egressservices.yaml run_kubectl apply -f k8s.ovn.org_adminpolicybasedexternalroutes.yaml run_kubectl apply -f k8s.ovn.org_userdefinednetworks.yaml + run_kubectl apply -f k8s.ovn.org_clusteruserdefinednetworks.yaml + run_kubectl apply -f k8s.ovn.org_routeadvertisements.yaml # NOTE: When you update vendoring versions for the ANP & BANP APIs, we must update the version of the CRD we pull from in the below URL run_kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/network-policy-api/v0.1.5/config/crd/experimental/policy.networking.k8s.io_adminnetworkpolicies.yaml run_kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/network-policy-api/v0.1.5/config/crd/experimental/policy.networking.k8s.io_baselineadminnetworkpolicies.yaml @@ -1133,6 +1160,9 @@ if [ "$OVN_ENABLE_DNSNAMERESOLVER" == true ]; then add_ocp_dnsnameresolver_to_coredns_config update_coredns_deployment_image fi +if [ "$ENABLE_ROUTE_ADVERTISEMENTS" == true ]; then + deploy_frr_external_container +fi build_ovn_image detect_apiserver_url create_ovn_kube_manifests @@ -1163,5 +1193,14 @@ if [ "$KIND_INSTALL_PLUGINS" == true ]; then fi if [ "$KIND_INSTALL_KUBEVIRT" == true ]; then install_kubevirt - install_kubevirt_ipam_controller + deploy_kubevirt_binding + deploy_passt_binary + + install_cert_manager + if [ "$KIND_OPT_OUT_KUBEVIRT_IPAM" != true ]; then + install_kubevirt_ipam_controller + fi +fi +if [ "$ENABLE_ROUTE_ADVERTISEMENTS" == true ]; then + install_ffr_k8s fi diff --git a/contrib/kind.yaml.j2 b/contrib/kind.yaml.j2 index d6d95752d5..ab1711fead 100644 --- a/contrib/kind.yaml.j2 +++ b/contrib/kind.yaml.j2 @@ -31,6 +31,12 @@ kubeadmConfigPatches: controllerManager: extraArgs: "v": "{{ cluster_log_level }}" + # Disable service-lb-controller for now + # https://github.com/kubernetes/kubernetes/issues/128121 + # Once the upstream issue is fixed we can remove this controller + # customization fully. Tracked with + # https://github.com/ovn-org/ovn-kubernetes/issues/4785 + "controllers": "*,bootstrap-signer-controller,token-cleaner-controller,-service-lb-controller" scheduler: extraArgs: "v": "{{ cluster_log_level }}" diff --git a/dist/images/.gitignore b/dist/images/.gitignore index 5df1690294..ebb12cdee1 100644 --- a/dist/images/.gitignore +++ b/dist/images/.gitignore @@ -3,6 +3,7 @@ ovn-kube-util ovnkube ovnkube-trace ovnkube-identity +ovnkube-observ ovndbchecker hybrid-overlay-node git_info diff --git a/dist/images/Dockerfile.fedora b/dist/images/Dockerfile.fedora index 7dd10b1bc0..858f6f9d88 100644 --- a/dist/images/Dockerfile.fedora +++ b/dist/images/Dockerfile.fedora @@ -9,13 +9,13 @@ # are built locally and included in the image (instead of the rpm) # -FROM fedora:39 +FROM fedora:41 USER root ENV PYTHONDONTWRITEBYTECODE yes -ARG ovnver=ovn-24.03.2-19.fc39 +ARG ovnver=ovn-24.09.0-33.fc41 # Automatically populated when using docker buildx ARG TARGETPLATFORM ARG BUILDPLATFORM @@ -24,27 +24,27 @@ RUN echo "Running on $BUILDPLATFORM, building for $TARGETPLATFORM" # install needed rpms - openvswitch must be 2.10.4 or higher RUN INSTALL_PKGS=" \ - python3-pip python3-pyyaml bind-utils procps-ng openssl numactl-libs firewalld-filesystem \ - libpcap hostname kubernetes-client util-linux \ - ovn ovn-central ovn-host python3-openvswitch tcpdump openvswitch-test python3-pyOpenSSL \ - iptables iproute iputils strace socat koji \ - libreswan openvswitch-ipsec \ - " && \ - dnf install --best --refresh -y --setopt=tsflags=nodocs $INSTALL_PKGS && \ - dnf clean all && rm -rf /var/cache/dnf/* + python3-pip python3-pyyaml bind-utils procps-ng openssl numactl-libs firewalld-filesystem \ + libpcap hostname kubernetes-client util-linux \ + ovn ovn-central ovn-host python3-openvswitch tcpdump openvswitch-test python3-pyOpenSSL \ + iptables nftables iproute iputils strace socat koji \ + libreswan openvswitch-ipsec \ + " && \ + dnf install --best --refresh -y --setopt=tsflags=nodocs $INSTALL_PKGS && \ + dnf clean all && rm -rf /var/cache/dnf/* RUN ln -s /usr/bin/python3 /usr/libexec/platform-python RUN mkdir -p /var/run/openvswitch RUN if [ "$TARGETPLATFORM" = "linux/amd64" ] || [ -z "$TARGETPLATFORM"] ; then koji download-build $ovnver --arch=x86_64 ; \ - else koji download-build $ovnver --arch=aarch64 ; fi + else koji download-build $ovnver --arch=aarch64 ; fi RUN rpm -Uhv --nodeps --force *.rpm # Built in ../../go_controller, then the binaries are copied here. # put things where they are in the pkg RUN mkdir -p /usr/libexec/cni/ -COPY ovnkube ovn-kube-util ovndbchecker hybrid-overlay-node ovnkube-identity /usr/bin/ +COPY ovnkube ovn-kube-util ovndbchecker hybrid-overlay-node ovnkube-identity ovnkube-observ /usr/bin/ COPY ovn-k8s-cni-overlay /usr/libexec/cni/ovn-k8s-cni-overlay # ovnkube.sh is the entry point. This script examines environment diff --git a/dist/images/Dockerfile.fedora.dev b/dist/images/Dockerfile.fedora.dev index 6e9ec9c71f..e0f67ec8d3 100644 --- a/dist/images/Dockerfile.fedora.dev +++ b/dist/images/Dockerfile.fedora.dev @@ -64,7 +64,7 @@ FROM fedora:39 # Install needed dependencies. RUN INSTALL_PKGS=" \ - iptables iproute iputils hostname unbound-libs kubernetes-client kmod" && \ + iptables nftables iproute iputils hostname unbound-libs kubernetes-client kmod" && \ dnf install --best --refresh -y --setopt=tsflags=nodocs $INSTALL_PKGS && \ dnf clean all && rm -rf /var/cache/dnf/* @@ -83,6 +83,7 @@ COPY ovn-kube-util /usr/bin/ COPY ovndbchecker /usr/bin/ COPY hybrid-overlay-node /usr/bin COPY ovnkube-identity /usr/bin/ +COPY ovnkube-observ /usr/bin COPY ovn-k8s-cni-overlay /usr/libexec/cni/ovn-k8s-cni-overlay # ovnkube.sh is the entry point. This script examines environment diff --git a/dist/images/Dockerfile.ubuntu b/dist/images/Dockerfile.ubuntu index 684ce2c040..10addc57d4 100644 --- a/dist/images/Dockerfile.ubuntu +++ b/dist/images/Dockerfile.ubuntu @@ -8,11 +8,11 @@ # # So this file will change over time. -FROM ubuntu:24.04 +FROM ubuntu:24.10 USER root -RUN apt-get update && apt-get install -y iproute2 curl software-properties-common util-linux +RUN apt-get update && apt-get install -y iproute2 curl software-properties-common util-linux nftables RUN curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - @@ -27,7 +27,7 @@ RUN mkdir -p /var/run/openvswitch # Built in ../../go_controller, then the binaries are copied here. # put things where they are in the pkg RUN mkdir -p /usr/libexec/cni/ -COPY ovnkube ovn-kube-util ovndbchecker hybrid-overlay-node ovnkube-identity /usr/bin/ +COPY ovnkube ovn-kube-util ovndbchecker hybrid-overlay-node ovnkube-identity ovnkube-observ /usr/bin/ COPY ovn-k8s-cni-overlay /usr/libexec/cni/ovn-k8s-cni-overlay # ovnkube.sh is the entry point. This script examines environment diff --git a/dist/images/Dockerfile.ubuntu.arm64 b/dist/images/Dockerfile.ubuntu.arm64 new file mode 100644 index 0000000000..b33df55add --- /dev/null +++ b/dist/images/Dockerfile.ubuntu.arm64 @@ -0,0 +1,55 @@ +# +# The standard name for this image is ovn-kube-ubuntu + +# Notes: +# This is for a development build where the ovn-kubernetes utilities +# are built in this Dockerfile and included in the image (instead of the deb package) +# +# +# So this file will change over time. + +FROM ubuntu:24.10 + +USER root + +RUN apt-get update && apt-get install -y iproute2 curl software-properties-common util-linux + +RUN curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - + +# Install OVS and OVN packages. +RUN apt-get update && apt-get install -y openvswitch-switch openvswitch-common ovn-central ovn-common ovn-host + +RUN curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl" \ + && install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl + +RUN mkdir -p /var/run/openvswitch + +# Built in ../../go_controller, then the binaries are copied here. +# put things where they are in the pkg +RUN mkdir -p /usr/libexec/cni/ +COPY ovnkube ovn-kube-util ovndbchecker hybrid-overlay-node ovnkube-identity ovnkube-observ /usr/bin/ +COPY ovn-k8s-cni-overlay /usr/libexec/cni/ovn-k8s-cni-overlay + +# ovnkube.sh is the entry point. This script examines environment +# variables to direct operation and configure ovn +COPY ovnkube.sh /root/ +COPY ovndb-raft-functions.sh /root/ +# override the pkg's ovn_k8s.conf with this local copy +COPY ovn_k8s.conf /etc/openvswitch/ovn_k8s.conf + +# copy git commit number into image +COPY git_info /root + +# iptables wrappers +COPY ./iptables-scripts/iptables /usr/sbin/ +COPY ./iptables-scripts/iptables-save /usr/sbin/ +COPY ./iptables-scripts/iptables-restore /usr/sbin/ +COPY ./iptables-scripts/ip6tables /usr/sbin/ +COPY ./iptables-scripts/ip6tables-save /usr/sbin/ +COPY ./iptables-scripts/ip6tables-restore /usr/sbin/ + +LABEL io.k8s.display-name="ovn-kubernetes" \ + io.k8s.description="ovnkube ubuntu image" + +WORKDIR /root +ENTRYPOINT /root/ovnkube.sh diff --git a/dist/images/daemonset.sh b/dist/images/daemonset.sh index c5ac11acc8..baa6d631c9 100755 --- a/dist/images/daemonset.sh +++ b/dist/images/daemonset.sh @@ -71,6 +71,7 @@ OVN_EGRESSSERVICE_ENABLE= OVN_DISABLE_OVN_IFACE_ID_VER="false" OVN_MULTI_NETWORK_ENABLE= OVN_NETWORK_SEGMENTATION_ENABLE= +OVN_ROUTE_ADVERTISEMENTS_ENABLE= OVN_V4_JOIN_SUBNET="" OVN_V6_JOIN_SUBNET="" OVN_V4_MASQUERADE_SUBNET="" @@ -94,11 +95,14 @@ OVN_ENABLE_OVNKUBE_IDENTITY="true" OVN_ENABLE_PERSISTENT_IPS= OVN_ENABLE_SVC_TEMPLATE_SUPPORT="true" OVN_ENABLE_DNSNAMERESOLVER="false" +OVN_NOHOSTSUBNET_LABEL="" +OVN_DISABLE_REQUESTEDCHASSIS="false" # IN_UPGRADE is true only if called by upgrade-ovn.sh during the upgrade test, # it will render only the parts in ovn-setup.yaml related to RBAC permissions. IN_UPGRADE= # northd-backoff-interval, in ms OVN_NORTHD_BACKOFF_INTERVAL= +OVN_OBSERV_ENABLE="false" # Parse parameters given as arguments to this script. while [ "$1" != "" ]; do @@ -267,6 +271,9 @@ while [ "$1" != "" ]; do --network-segmentation-enable) OVN_NETWORK_SEGMENTATION_ENABLE=$VALUE ;; + --route-advertisements-enable) + OVN_ROUTE_ADVERTISEMENTS_ENABLE=$VALUE + ;; --egress-service-enable) OVN_EGRESSSERVICE_ENABLE=$VALUE ;; @@ -354,6 +361,15 @@ while [ "$1" != "" ]; do --enable-dnsnameresolver) OVN_ENABLE_DNSNAMERESOLVER=$VALUE ;; + --enable-observ) + OVN_OBSERV_ENABLE=$VALUE + ;; + --no-hostsubnet-label) + OVN_NOHOSTSUBNET_LABEL=$VALUE + ;; + --ovn_disable_requestedchassis) + OVN_DISABLE_REQUESTEDCHASSIS=$value + ;; *) echo "WARNING: unknown parameter \"$PARAM\"" exit 1 @@ -444,6 +460,8 @@ ovn_multi_network_enable=${OVN_MULTI_NETWORK_ENABLE} echo "ovn_multi_network_enable: ${ovn_multi_network_enable}" ovn_network_segmentation_enable=${OVN_NETWORK_SEGMENTATION_ENABLE} echo "ovn_network_segmentation_enable: ${ovn_network_segmentation_enable}" +ovn_route_advertisements_enable=${OVN_ROUTE_ADVERTISEMENTS_ENABLE} +echo "ovn_route_advertisements_enable: ${ovn_route_advertisements_enable}" ovn_hybrid_overlay_net_cidr=${OVN_HYBRID_OVERLAY_NET_CIDR} echo "ovn_hybrid_overlay_net_cidr: ${ovn_hybrid_overlay_net_cidr}" ovn_disable_snat_multiple_gws=${OVN_DISABLE_SNAT_MULTIPLE_GWS} @@ -544,6 +562,15 @@ echo "ovn_enable_svc_template_support: ${ovn_enable_svc_template_support}" ovn_enable_dnsnameresolver=${OVN_ENABLE_DNSNAMERESOLVER} echo "ovn_enable_dnsnameresolver: ${ovn_enable_dnsnameresolver}" +ovn_observ_enable=${OVN_OBSERV_ENABLE} +echo "ovn_observ_enable: ${ovn_observ_enable}" + +ovn_nohostsubnet_label=${OVN_NOHOSTSUBNET_LABEL} +echo "ovn_nohostsubnet_label: ${ovn_nohostsubnet_label}" + +ovn_disable_requestedchassis=${OVN_DISABLE_REQUESTEDCHASSIS} +echo "ovn_disable_requestedchassis: ${ovn_disable_requestedchassis}" + ovn_image=${ovnkube_image} \ ovnkube_compact_mode_enable=${ovnkube_compact_mode_enable} \ ovn_image_pull_policy=${image_pull_policy} \ @@ -572,6 +599,7 @@ ovn_image=${ovnkube_image} \ ovn_egress_ip_healthcheck_port=${ovn_egress_ip_healthcheck_port} \ ovn_multi_network_enable=${ovn_multi_network_enable} \ ovn_network_segmentation_enable=${ovn_network_segmentation_enable} \ + ovn_route_advertisements_enable=${ovn_route_advertisements_enable} \ ovn_egress_service_enable=${ovn_egress_service_enable} \ ovn_ssl_en=${ovn_ssl_en} \ ovn_remote_probe_interval=${ovn_remote_probe_interval} \ @@ -592,6 +620,7 @@ ovn_image=${ovnkube_image} \ ovn_enable_interconnect=${ovn_enable_interconnect} \ ovn_enable_multi_external_gateway=${ovn_enable_multi_external_gateway} \ ovn_enable_ovnkube_identity=${ovn_enable_ovnkube_identity} \ + ovn_observ_enable=${ovn_observ_enable} \ ovnkube_app_name=ovnkube-node \ jinjanate ../templates/ovnkube-node.yaml.j2 -o ${output_dir}/ovnkube-node.yaml @@ -623,6 +652,7 @@ ovn_image=${ovnkube_image} \ ovn_egress_ip_healthcheck_port=${ovn_egress_ip_healthcheck_port} \ ovn_multi_network_enable=${ovn_multi_network_enable} \ ovn_network_segmentation_enable=${ovn_network_segmentation_enable} \ + ovn_route_advertisements_enable=${ovn_route_advertisements_enable} \ ovn_egress_service_enable=${ovn_egress_service_enable} \ ovn_ssl_en=${ovn_ssl_en} \ ovn_remote_probe_interval=${ovn_remote_probe_interval} \ @@ -643,6 +673,7 @@ ovn_image=${ovnkube_image} \ ovn_enable_interconnect=${ovn_enable_interconnect} \ ovn_enable_multi_external_gateway=${ovn_enable_multi_external_gateway} \ ovn_enable_ovnkube_identity=${ovn_enable_ovnkube_identity} \ + ovn_observ_enable=${ovn_observ_enable} \ ovnkube_app_name=ovnkube-node-dpu \ jinjanate ../templates/ovnkube-node.yaml.j2 -o ${output_dir}/ovnkube-node-dpu.yaml @@ -718,6 +749,7 @@ ovn_image=${ovnkube_image} \ ovn_egress_qos_enable=${ovn_egress_qos_enable} \ ovn_multi_network_enable=${ovn_multi_network_enable} \ ovn_network_segmentation_enable=${ovn_network_segmentation_enable} \ + ovn_route_advertisements_enable=${ovn_route_advertisements_enable} \ ovn_egress_service_enable=${ovn_egress_service_enable} \ ovn_ssl_en=${ovn_ssl_en} \ ovn_master_count=${ovn_master_count} \ @@ -733,6 +765,9 @@ ovn_image=${ovnkube_image} \ ovn_enable_persistent_ips=${ovn_enable_persistent_ips} \ ovn_enable_svc_template_support=${ovn_enable_svc_template_support} \ ovn_enable_dnsnameresolver=${ovn_enable_dnsnameresolver} \ + ovn_observ_enable=${ovn_observ_enable} \ + ovn_nohostsubnet_label=${ovn_nohostsubnet_label} \ + ovn_disable_requestedchassis=${ovn_disable_requestedchassis} \ jinjanate ../templates/ovnkube-master.yaml.j2 -o ${output_dir}/ovnkube-master.yaml ovn_image=${ovnkube_image} \ @@ -762,6 +797,7 @@ ovn_image=${ovnkube_image} \ ovn_egress_qos_enable=${ovn_egress_qos_enable} \ ovn_multi_network_enable=${ovn_multi_network_enable} \ ovn_network_segmentation_enable=${ovn_network_segmentation_enable} \ + ovn_route_advertisements_enable=${ovn_route_advertisements_enable} \ ovn_egress_service_enable=${ovn_egress_service_enable} \ ovn_ssl_en=${ovn_ssl_en} \ ovn_master_count=${ovn_master_count} \ @@ -774,6 +810,7 @@ ovn_image=${ovnkube_image} \ ovn_v6_transit_switch_subnet=${ovn_v6_transit_switch_subnet} \ ovn_enable_persistent_ips=${ovn_enable_persistent_ips} \ ovn_enable_dnsnameresolver=${ovn_enable_dnsnameresolver} \ + ovn_observ_enable=${ovn_observ_enable} \ jinjanate ../templates/ovnkube-control-plane.yaml.j2 -o ${output_dir}/ovnkube-control-plane.yaml ovn_image=${image} \ @@ -839,6 +876,7 @@ ovn_image=${ovnkube_image} \ ovn_egress_qos_enable=${ovn_egress_qos_enable} \ ovn_multi_network_enable=${ovn_multi_network_enable} \ ovn_network_segmentation_enable=${ovn_network_segmentation_enable} \ + ovn_route_advertisements_enable=${ovn_route_advertisements_enable} \ ovn_egress_service_enable=${ovn_egress_service_enable} \ ovn_ssl_en=${ovn_ssl_en} \ ovn_remote_probe_interval=${ovn_remote_probe_interval} \ @@ -869,6 +907,7 @@ ovn_image=${ovnkube_image} \ ovn_enable_persistent_ips=${ovn_enable_persistent_ips} \ ovn_enable_svc_template_support=${ovn_enable_svc_template_support} \ ovn_enable_dnsnameresolver=${ovn_enable_dnsnameresolver} \ + ovn_observ_enable=${ovn_observ_enable} \ jinjanate ../templates/ovnkube-single-node-zone.yaml.j2 -o ${output_dir}/ovnkube-single-node-zone.yaml ovn_image=${ovnkube_image} \ @@ -903,6 +942,7 @@ ovn_image=${ovnkube_image} \ ovn_egress_qos_enable=${ovn_egress_qos_enable} \ ovn_multi_network_enable=${ovn_multi_network_enable} \ ovn_network_segmentation_enable=${ovn_network_segmentation_enable} \ + ovn_route_advertisements_enable=${ovn_route_advertisements_enable} \ ovn_ssl_en=${ovn_ssl_en} \ ovn_remote_probe_interval=${ovn_remote_probe_interval} \ ovn_monitor_all=${ovn_monitor_all} \ @@ -932,6 +972,7 @@ ovn_image=${ovnkube_image} \ ovn_enable_persistent_ips=${ovn_enable_persistent_ips} \ ovn_enable_svc_template_support=${ovn_enable_svc_template_support} \ ovn_enable_dnsnameresolver=${ovn_enable_dnsnameresolver} \ + ovn_observ_enable=${ovn_observ_enable} \ jinjanate ../templates/ovnkube-zone-controller.yaml.j2 -o ${output_dir}/ovnkube-zone-controller.yaml ovn_image=${image} \ @@ -1011,5 +1052,7 @@ cp ../templates/k8s.ovn.org_egressqoses.yaml.j2 ${output_dir}/k8s.ovn.org_egress cp ../templates/k8s.ovn.org_egressservices.yaml.j2 ${output_dir}/k8s.ovn.org_egressservices.yaml cp ../templates/k8s.ovn.org_adminpolicybasedexternalroutes.yaml.j2 ${output_dir}/k8s.ovn.org_adminpolicybasedexternalroutes.yaml cp ../templates/k8s.ovn.org_userdefinednetworks.yaml.j2 ${output_dir}/k8s.ovn.org_userdefinednetworks.yaml +cp ../templates/k8s.ovn.org_clusteruserdefinednetworks.yaml.j2 ${output_dir}/k8s.ovn.org_clusteruserdefinednetworks.yaml +cp ../templates/k8s.ovn.org_routeadvertisements.yaml.j2 ${output_dir}/k8s.ovn.org_routeadvertisements.yaml exit 0 diff --git a/dist/images/ovnkube.sh b/dist/images/ovnkube.sh index 265face90f..8ba6205695 100755 --- a/dist/images/ovnkube.sh +++ b/dist/images/ovnkube.sh @@ -96,6 +96,7 @@ fi # OVN_NORTHD_BACKOFF_INTERVAL - ovn northd backoff interval in ms (default 300) # OVN_ENABLE_SVC_TEMPLATE_SUPPORT - enable svc template support # OVN_ENABLE_DNSNAMERESOLVER - enable dns name resolver support +# OVN_OBSERV_ENABLE - enable observability for ovnkube # The argument to the command is the operation to be performed # ovn-master ovn-controller ovn-node display display_env ovn_debug @@ -268,6 +269,8 @@ ovn_disable_ovn_iface_id_ver=${OVN_DISABLE_OVN_IFACE_ID_VER:-false} ovn_multi_network_enable=${OVN_MULTI_NETWORK_ENABLE:-false} #OVN_NETWORK_SEGMENTATION_ENABLE - enable user defined primary networks for ovn-kubernetes ovn_network_segmentation_enable=${OVN_NETWORK_SEGMENTATION_ENABLE:=false} +#OVN_NROUTE_ADVERTISEMENTS_ENABLE - enable route advertisements for ovn-kubernetes +ovn_route_advertisements_enable=${OVN_ROUTE_ADVERTISEMENTS_ENABLE:=false} ovn_acl_logging_rate_limit=${OVN_ACL_LOGGING_RATE_LIMIT:-"20"} ovn_netflow_targets=${OVN_NETFLOW_TARGETS:-} ovn_sflow_targets=${OVN_SFLOW_TARGETS:-} @@ -310,6 +313,13 @@ ovn_northd_backoff_interval=${OVN_NORTHD_BACKOFF_INTERVAL:-"300"} ovn_enable_svc_template_support=${OVN_ENABLE_SVC_TEMPLATE_SUPPORT:-true} # OVN_ENABLE_DNSNAMERESOLVER - enable dns name resolver support ovn_enable_dnsnameresolver=${OVN_ENABLE_DNSNAMERESOLVER:-false} +# OVN_OBSERV_ENABLE - enable observability for ovnkube +ovn_observ_enable=${OVN_OBSERV_ENABLE:-false} +# OVN_NOHOSTSUBNET_LABEL - node label indicating nodes managing their own network +ovn_nohostsubnet_label=${OVN_NOHOSTSUBNET_LABEL:-""} +# OVN_DISABLE_REQUESTEDCHASSIS - disable requested-chassis option during pod creation +# should be set to true when dpu nodes are in the cluster +ovn_disable_requestedchassis=${OVN_DISABLE_REQUESTEDCHASSIS:-false} # Determine the ovn rundir. if [[ -f /usr/bin/ovn-appctl ]]; then @@ -1213,6 +1223,12 @@ ovn-master() { fi echo "network_segmentation_enabled_flag=${network_segmentation_enabled_flag}" + route_advertisements_enabled_flag= + if [[ ${ovn_route_advertisements_enable} == "true" ]]; then + route_advertisements_enabled_flag="--enable-route-advertisements" + fi + echo "route_advertisements_enabled_flag=${route_advertisements_enabled_flag}" + egressservice_enabled_flag= if [[ ${ovn_egressservice_enable} == "true" ]]; then egressservice_enabled_flag="--enable-egress-service" @@ -1259,6 +1275,23 @@ ovn-master() { fi echo "ovn_enable_svc_template_support_flag=${ovn_enable_svc_template_support_flag}" + ovn_observ_enable_flag= + if [[ ${ovn_observ_enable} == "true" ]]; then + ovn_observ_enable_flag="--enable-observability" + fi + echo "ovn_observ_enable_flag=${ovn_observ_enable_flag}" + + nohostsubnet_label_option= + if [[ ${ovn_nohostsubnet_label} != "" ]]; then + nohostsubnet_label_option="--no-hostsubnet-nodes=${ovn_nohostsubnet_label}" + fi + + ovn_disable_requestedchassis_flag= + if [[ ${ovn_disable_requestedchassis} == "true" ]]; then + ovn_disable_requestedchassis_flag="--disable-requestedchassis" + fi + echo "ovn_disable_requestedchassis_flag=${ovn_disable_requestedchassis_flag}" + init_node_flags= if [[ ${ovnkube_compact_mode_enable} == "true" ]]; then init_node_flags="--init-node ${K8S_NODE} --nodeport" @@ -1296,8 +1329,10 @@ ovn-master() { ${multicast_enabled_flag} \ ${multi_network_enabled_flag} \ ${network_segmentation_enabled_flag} \ + ${route_advertisements_enabled_flag} \ ${ovn_acl_logging_rate_limit_flag} \ ${ovn_enable_svc_template_support_flag} \ + ${ovn_observ_enable_flag} \ ${ovnkube_config_duration_enable_flag} \ ${ovnkube_enable_multi_external_gateway_flag} \ ${ovnkube_metrics_scale_enable_flag} \ @@ -1310,6 +1345,8 @@ ovn-master() { ${ovn_v6_masquerade_subnet_opt} \ ${persistent_ips_enabled_flag} \ ${ovn_enable_dnsnameresolver_flag} \ + ${nohostsubnet_label_option} \ + ${ovn_disable_requestedchassis_flag} \ --cluster-subnets ${net_cidr} --k8s-service-cidr=${svc_cidr} \ --gateway-mode=${ovn_gateway_mode} ${ovn_gateway_opts} \ --host-network-namespace ${ovn_host_network_namespace} \ @@ -1481,6 +1518,12 @@ ovnkube-controller() { fi echo "network_segmentation_enabled_flag=${network_segmentation_enabled_flag}" + route_advertisements_enabled_flag= + if [[ ${ovn_route_advertisements_enable} == "true" ]]; then + route_advertisements_enabled_flag="--enable-route-advertisements" + fi + echo "route_advertisements_enabled_flag=${route_advertisements_enabled_flag}" + egressservice_enabled_flag= if [[ ${ovn_egressservice_enable} == "true" ]]; then egressservice_enabled_flag="--enable-egress-service" @@ -1561,6 +1604,12 @@ ovnkube-controller() { fi echo "ovn_enable_dnsnameresolver_flag=${ovn_enable_dnsnameresolver_flag}" + ovn_observ_enable_flag= + if [[ ${ovn_observ_enable} == "true" ]]; then + ovn_observ_enable_flag="--enable-observability" + fi + echo "ovn_observ_enable_flag=${ovn_observ_enable_flag}" + echo "=============== ovnkube-controller ========== MASTER ONLY" /usr/bin/ovnkube --init-ovnkube-controller ${K8S_NODE} \ ${anp_enabled_flag} \ @@ -1576,9 +1625,11 @@ ovnkube-controller() { ${multicast_enabled_flag} \ ${multi_network_enabled_flag} \ ${network_segmentation_enabled_flag} \ + ${route_advertisements_enabled_flag} \ ${ovn_acl_logging_rate_limit_flag} \ ${ovn_dbs} \ ${ovn_enable_svc_template_support_flag} \ + ${ovn_observ_enable_flag} \ ${ovnkube_config_duration_enable_flag} \ ${ovnkube_enable_interconnect_flag} \ ${ovnkube_local_cert_flags} \ @@ -1757,6 +1808,12 @@ ovnkube-controller-with-node() { fi echo "network_segmentation_enabled_flag=${network_segmentation_enabled_flag}" + route_advertisements_enabled_flag= + if [[ ${ovn_route_advertisements_enable} == "true" ]]; then + route_advertisements_enabled_flag="--enable-route-advertisements" + fi + echo "route_advertisements_enabled_flag=${route_advertisements_enabled_flag}" + egressservice_enabled_flag= if [[ ${ovn_egressservice_enable} == "true" ]]; then egressservice_enabled_flag="--enable-egress-service" @@ -1960,6 +2017,12 @@ ovnkube-controller-with-node() { fi echo "ovn_enable_dnsnameresolver_flag=${ovn_enable_dnsnameresolver_flag}" + ovn_observ_enable_flag= + if [[ ${ovn_observ_enable} == "true" ]]; then + ovn_observ_enable_flag="--enable-observability" + fi + echo "ovn_observ_enable_flag=${ovn_observ_enable_flag}" + echo "=============== ovnkube-controller-with-node --init-ovnkube-controller-with-node==========" /usr/bin/ovnkube --init-ovnkube-controller ${K8S_NODE} --init-node ${K8S_NODE} \ ${anp_enabled_flag} \ @@ -1985,11 +2048,13 @@ ovnkube-controller-with-node() { ${multicast_enabled_flag} \ ${multi_network_enabled_flag} \ ${network_segmentation_enabled_flag} \ + ${route_advertisements_enabled_flag} \ ${netflow_targets} \ ${ofctrl_wait_before_clear} \ ${ovn_acl_logging_rate_limit_flag} \ ${ovn_dbs} \ ${ovn_enable_svc_template_support_flag} \ + ${ovn_observ_enable_flag} \ ${ovn_encap_ip_flag} \ ${ovn_encap_port_flag} \ ${ovnkube_config_duration_enable_flag} \ @@ -2026,6 +2091,7 @@ ovnkube-controller-with-node() { --nodeport \ --ovn-metrics-bind-address ${ovn_metrics_bind_address} \ --pidfile ${OVN_RUNDIR}/ovnkube-controller-with-node.pid \ + --disable-udn-host-isolation \ --zone ${ovn_zone} & wait_for_event attempts=3 process_ready ovnkube-controller-with-node @@ -2148,6 +2214,12 @@ ovn-cluster-manager() { fi echo "network_segmentation_enabled_flag=${network_segmentation_enabled_flag}" + route_advertisements_enabled_flag= + if [[ ${ovn_route_advertisements_enable} == "true" ]]; then + route_advertisements_enabled_flag="--enable-route-advertisements" + fi + echo "route_advertisements_enabled_flag=${route_advertisements_enabled_flag}" + persistent_ips_enabled_flag= if [[ ${ovn_enable_persistent_ips} == "true" ]]; then persistent_ips_enabled_flag="--enable-persistent-ips" @@ -2203,6 +2275,7 @@ ovn-cluster-manager() { ${multicast_enabled_flag} \ ${multi_network_enabled_flag} \ ${network_segmentation_enabled_flag} \ + ${route_advertisements_enabled_flag} \ ${persistent_ips_enabled_flag} \ ${ovnkube_enable_interconnect_flag} \ ${ovnkube_enable_multi_external_gateway_flag} \ @@ -2370,6 +2443,11 @@ ovn-node() { network_segmentation_enabled_flag="--enable-multi-network --enable-network-segmentation" fi + route_advertisements_enabled_flag= + if [[ ${ovn_route_advertisements_enable} == "true" ]]; then + route_advertisements_enabled_flag="--enable-route-advertisements" + fi + netflow_targets= if [[ -n ${ovn_netflow_targets} ]]; then netflow_targets="--netflow-targets ${ovn_netflow_targets}" @@ -2459,6 +2537,39 @@ ovn-node() { node_mgmt_port_netdev_flags="$node_mgmt_port_netdev_flags --ovnkube-node-mgmt-port-dp-resource-name ${ovnkube_node_mgmt_port_dp_resource_name}" fi + if [[ ${ovnkube_node_mode} == "dpu" ]]; then + # in the case of dpu mode we want the host K8s Node Name and not the DPU K8s Node Name + K8S_NODE=$(ovs-vsctl --if-exists get Open_vSwitch . external_ids:host-k8s-nodename | tr -d '\"') + if [[ ${K8S_NODE} == "" ]]; then + echo "Couldn't get the required Host K8s Nodename. Exiting..." + exit 1 + fi + if [[ ${ovn_gateway_opts} == "" ]]; then + # get the gateway interface + gw_iface=$(ovs-vsctl --if-exists get Open_vSwitch . external_ids:ovn-gw-interface | tr -d \") + if [[ ${gw_iface} == "" ]]; then + echo "Couldn't get the required OVN Gateway Interface. Exiting..." + exit 1 + fi + ovn_gateway_opts="--gateway-interface=${gw_iface} " + + # get the gateway nexthop + gw_nexthop=$(ovs-vsctl --if-exists get Open_vSwitch . external_ids:ovn-gw-nexthop | tr -d \") + if [[ ${gw_nexthop} == "" ]]; then + echo "Couldn't get the required OVN Gateway NextHop. Exiting..." + exit 1 + fi + ovn_gateway_opts+="--gateway-nexthop=${gw_nexthop} " + fi + + # this is required if the DPU and DPU Host are in different subnets + if [[ ${ovn_gateway_router_subnet} == "" ]]; then + # get the gateway router subnet + ovn_gateway_router_subnet=$(ovs-vsctl --if-exists get Open_vSwitch . external_ids:ovn-gw-router-subnet | tr -d \") + fi + + fi + local ovn_node_ssl_opts="" if [[ ${ovnkube_node_mode} != "dpu-host" ]]; then [[ "yes" == ${OVN_SSL_ENABLE} ]] && { @@ -2559,6 +2670,7 @@ ovn-node() { ${multicast_enabled_flag} \ ${multi_network_enabled_flag} \ ${network_segmentation_enabled_flag} \ + ${route_advertisements_enabled_flag} \ ${netflow_targets} \ ${ofctrl_wait_before_clear} \ ${ovn_dbs} \ @@ -2594,6 +2706,7 @@ ovn-node() { --nodeport \ --ovn-metrics-bind-address ${ovn_metrics_bind_address} \ --pidfile ${OVN_RUNDIR}/ovnkube.pid \ + --disable-udn-host-isolation \ --zone ${ovn_zone} & wait_for_event attempts=3 process_ready ovnkube diff --git a/dist/templates/k8s.ovn.org_adminpolicybasedexternalroutes.yaml.j2 b/dist/templates/k8s.ovn.org_adminpolicybasedexternalroutes.yaml.j2 index 53392b4082..98b9071d50 100644 --- a/dist/templates/k8s.ovn.org_adminpolicybasedexternalroutes.yaml.j2 +++ b/dist/templates/k8s.ovn.org_adminpolicybasedexternalroutes.yaml.j2 @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.4 name: adminpolicybasedexternalroutes.k8s.ovn.org spec: group: k8s.ovn.org @@ -85,11 +85,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -154,11 +156,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -205,11 +209,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string diff --git a/dist/templates/k8s.ovn.org_clusteruserdefinednetworks.yaml.j2 b/dist/templates/k8s.ovn.org_clusteruserdefinednetworks.yaml.j2 new file mode 100644 index 0000000000..cad81a2c28 --- /dev/null +++ b/dist/templates/k8s.ovn.org_clusteruserdefinednetworks.yaml.j2 @@ -0,0 +1,330 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.4 + name: clusteruserdefinednetworks.k8s.ovn.org +spec: + group: k8s.ovn.org + names: + kind: ClusterUserDefinedNetwork + listKind: ClusterUserDefinedNetworkList + plural: clusteruserdefinednetworks + singular: clusteruserdefinednetwork + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ClusterUserDefinedNetwork describe network request for a shared + network across namespaces. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClusterUserDefinedNetworkSpec defines the desired state of + ClusterUserDefinedNetwork. + properties: + namespaceSelector: + description: NamespaceSelector Label selector for which namespace + network should be available for. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + network: + description: Network is the user-defined-network spec + properties: + layer2: + description: Layer2 is the Layer2 topology configuration. + properties: + ipamLifecycle: + description: |- + IPAMLifecycle controls IP addresses management lifecycle. + + The only allowed value is Persistent. When set, OVN Kubernetes assigned IP addresses will be persisted in an + `ipamclaims.k8s.cni.cncf.io` object. These IP addresses will be reused by other pods if requested. + Only supported when "subnets" are set. + enum: + - Persistent + type: string + joinSubnets: + description: |- + JoinSubnets are used inside the OVN network topology. + + Dual-stack clusters may set 2 subnets (one for each IP family), otherwise only 1 subnet is allowed. + This field is only allowed for "Primary" network. + It is not recommended to set this field without explicit need and understanding of the OVN network topology. + When omitted, the platform will choose a reasonable default which is subject to change over time. + items: + type: string + maxItems: 2 + minItems: 1 + type: array + mtu: + description: |- + MTU is the maximum transmission unit for a network. + MTU is optional, if not provided, the globally configured value in OVN-Kubernetes (defaults to 1400) is used for the network. + format: int32 + maximum: 65536 + minimum: 576 + type: integer + role: + description: |- + Role describes the network role in the pod. + + Allowed value is "Secondary". + Secondary network is only assigned to pods that use `k8s.v1.cni.cncf.io/networks` annotation to select given network. + enum: + - Primary + - Secondary + type: string + subnets: + description: |- + Subnets are used for the pod network across the cluster. + Dual-stack clusters may set 2 subnets (one for each IP family), otherwise only 1 subnet is allowed. + + The format should match standard CIDR notation (for example, "10.128.0.0/16"). + This field may be omitted. In that case the logical switch implementing the network only provides layer 2 communication, + and users must configure IP addresses for the pods. As a consequence, Port security only prevents MAC spoofing. + items: + type: string + maxItems: 2 + minItems: 1 + type: array + required: + - role + type: object + x-kubernetes-validations: + - message: Subnets is required for Primary Layer2 topology + rule: self.role != 'Primary' || has(self.subnets) && size(self.subnets) + > 0 + - message: JoinSubnets is only supported for Primary network + rule: '!has(self.joinSubnets) || has(self.role) && self.role + == ''Primary''' + - message: IPAMLifecycle is only supported when subnets are set + rule: '!has(self.ipamLifecycle) || has(self.subnets) && size(self.subnets) + > 0' + layer3: + description: Layer3 is the Layer3 topology configuration. + properties: + joinSubnets: + description: |- + JoinSubnets are used inside the OVN network topology. + + Dual-stack clusters may set 2 subnets (one for each IP family), otherwise only 1 subnet is allowed. + This field is only allowed for "Primary" network. + It is not recommended to set this field without explicit need and understanding of the OVN network topology. + When omitted, the platform will choose a reasonable default which is subject to change over time. + items: + type: string + maxItems: 2 + minItems: 1 + type: array + mtu: + description: |- + MTU is the maximum transmission unit for a network. + + MTU is optional, if not provided, the globally configured value in OVN-Kubernetes (defaults to 1400) is used for the network. + format: int32 + maximum: 65536 + minimum: 576 + type: integer + role: + description: |- + Role describes the network role in the pod. + + Allowed values are "Primary" and "Secondary". + Primary network is automatically assigned to every pod created in the same namespace. + Secondary network is only assigned to pods that use `k8s.v1.cni.cncf.io/networks` annotation to select given network. + enum: + - Primary + - Secondary + type: string + subnets: + description: |- + Subnets are used for the pod network across the cluster. + + Dual-stack clusters may set 2 subnets (one for each IP family), otherwise only 1 subnet is allowed. + Given subnet is split into smaller subnets for every node. + items: + properties: + cidr: + description: CIDR specifies L3Subnet, which is split + into smaller subnets for every node. + type: string + hostSubnet: + description: |- + HostSubnet specifies the subnet size for every node. + + When not set, it will be assigned automatically. + format: int32 + maximum: 127 + minimum: 1 + type: integer + required: + - cidr + type: object + maxItems: 2 + minItems: 1 + type: array + required: + - role + - subnets + type: object + x-kubernetes-validations: + - message: Subnets is required for Layer3 topology + rule: has(self.subnets) && size(self.subnets) > 0 + - message: JoinSubnets is only supported for Primary network + rule: '!has(self.joinSubnets) || has(self.role) && self.role + == ''Primary''' + topology: + description: |- + Topology describes network configuration. + + Allowed values are "Layer3", "Layer2". + Layer3 topology creates a layer 2 segment per node, each with a different subnet. Layer 3 routing is used to interconnect node subnets. + Layer2 topology creates one logical switch shared by all nodes. + enum: + - Layer2 + - Layer3 + type: string + required: + - topology + type: object + x-kubernetes-validations: + - message: Network spec is immutable + rule: self == oldSelf + required: + - namespaceSelector + - network + type: object + status: + description: ClusterUserDefinedNetworkStatus contains the observed status + of the ClusterUserDefinedNetwork. + properties: + conditions: + description: Conditions slice of condition objects indicating details + about ClusterUserDefineNetwork status. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/dist/templates/k8s.ovn.org_egressfirewalls.yaml.j2 b/dist/templates/k8s.ovn.org_egressfirewalls.yaml.j2 index d6566dcc86..c7f7e73cc4 100644 --- a/dist/templates/k8s.ovn.org_egressfirewalls.yaml.j2 +++ b/dist/templates/k8s.ovn.org_egressfirewalls.yaml.j2 @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.4 name: egressfirewalls.k8s.ovn.org spec: group: k8s.ovn.org @@ -93,13 +93,11 @@ spec: be unset. type: string dnsName: - description: dnsName is the domain name to allow/deny traffic - to. If this is set, cidrSelector and nodeSelector must - be unset. For a wildcard DNS name, the '*' will match - only one label. Additionally, only a single '*' can be - used at the beginning of the wildcard DNS name. For example, - '*.example.com' will match 'sub1.example.com' but won't - match 'sub2.sub1.example.com'. + description: |- + dnsName is the domain name to allow/deny traffic to. If this is set, cidrSelector and nodeSelector must be unset. + For a wildcard DNS name, the '*' will match only one label. Additionally, only a single '*' can be + used at the beginning of the wildcard DNS name. For example, '*.example.com' will match 'sub1.example.com' + but won't match 'sub2.sub1.example.com'. pattern: ^(\*\.)?([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$ type: string nodeSelector: @@ -133,11 +131,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string diff --git a/dist/templates/k8s.ovn.org_egressips.yaml.j2 b/dist/templates/k8s.ovn.org_egressips.yaml.j2 index 0e19e28173..bca5f41a69 100644 --- a/dist/templates/k8s.ovn.org_egressips.yaml.j2 +++ b/dist/templates/k8s.ovn.org_egressips.yaml.j2 @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.4 name: egressips.k8s.ovn.org spec: group: k8s.ovn.org @@ -92,11 +92,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -143,11 +145,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string diff --git a/dist/templates/k8s.ovn.org_egressqoses.yaml.j2 b/dist/templates/k8s.ovn.org_egressqoses.yaml.j2 index 415d70313c..7440d08690 100644 --- a/dist/templates/k8s.ovn.org_egressqoses.yaml.j2 +++ b/dist/templates/k8s.ovn.org_egressqoses.yaml.j2 @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.4 name: egressqoses.k8s.ovn.org spec: group: k8s.ovn.org @@ -101,11 +101,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -130,16 +132,8 @@ spec: description: An array of condition objects indicating details about status of EgressQoS object. items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -180,12 +174,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/dist/templates/k8s.ovn.org_egressservices.yaml.j2 b/dist/templates/k8s.ovn.org_egressservices.yaml.j2 index 683611fb29..45c16b1de1 100644 --- a/dist/templates/k8s.ovn.org_egressservices.yaml.j2 +++ b/dist/templates/k8s.ovn.org_egressservices.yaml.j2 @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.4 name: egressservices.k8s.ovn.org spec: group: k8s.ovn.org @@ -84,11 +84,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string diff --git a/dist/templates/k8s.ovn.org_routeadvertisements.yaml.j2 b/dist/templates/k8s.ovn.org_routeadvertisements.yaml.j2 new file mode 100644 index 0000000000..49a6cb8657 --- /dev/null +++ b/dist/templates/k8s.ovn.org_routeadvertisements.yaml.j2 @@ -0,0 +1,293 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.4 + name: routeadvertisements.k8s.ovn.org +spec: + group: k8s.ovn.org + names: + kind: RouteAdvertisements + listKind: RouteAdvertisementsList + plural: routeadvertisements + shortNames: + - ra + singular: routeadvertisements + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.status + name: Status + type: string + name: v1 + schema: + openAPIV3Schema: + description: RouteAdvertisements is the Schema for the routeadvertisements + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RouteAdvertisementsSpec defines the desired state of RouteAdvertisements + properties: + advertisements: + description: advertisements determines what is advertised. + items: + description: AdvertisementType determines the type of advertisement. + type: string + maxItems: 2 + minItems: 1 + type: array + x-kubernetes-validations: + - rule: self.all(x, self.exists_one(y, x == y)) + frrConfigurationSelector: + description: |- + frrConfigurationSelector determines which FRRConfigurations will the + OVN-Kubernetes driven FRRConfigurations be based on. + When omitted, all FRRConfigurations will be considered. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + networkSelector: + description: |- + networkSelector determines which network routes should be advertised. To + select the default network, match on label 'k8s.ovn.org/default-network'. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + nodeSelector: + description: |- + nodeSelector limits the advertisements to selected nodes. + When omitted, all nodes are selected. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + targetVRF: + description: targetVRF determines which VRF the routes should be advertised + in. + type: string + required: + - advertisements + type: object + x-kubernetes-validations: + - message: If 'PodNetwork' is selected for advertisement, a 'nodeSelector' + can't be specified as it needs to be advertised on all nodes + rule: '!has(self.nodeSelector) || !(''PodNetwork'' in self.advertisements)' + status: + description: |- + RouteAdvertisementsStatus defines the observed state of RouteAdvertisements. + It should always be reconstructable from the state of the cluster and/or + outside world. + properties: + conditions: + description: |- + conditions is an array of condition objects indicating details about + status of RouteAdvertisements object. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + status: + description: |- + status is a concise indication of whether the RouteAdvertisements + resource is applied with success. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/dist/templates/k8s.ovn.org_userdefinednetworks.yaml.j2 b/dist/templates/k8s.ovn.org_userdefinednetworks.yaml.j2 index cdacc9ccee..49856ca206 100644 --- a/dist/templates/k8s.ovn.org_userdefinednetworks.yaml.j2 +++ b/dist/templates/k8s.ovn.org_userdefinednetworks.yaml.j2 @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.4 name: userdefinednetworks.k8s.ovn.org spec: group: k8s.ovn.org @@ -46,7 +46,6 @@ spec: description: |- IPAMLifecycle controls IP addresses management lifecycle. - The only allowed value is Persistent. When set, OVN Kubernetes assigned IP addresses will be persisted in an `ipamclaims.k8s.cni.cncf.io` object. These IP addresses will be reused by other pods if requested. Only supported when "subnets" are set. @@ -57,7 +56,6 @@ spec: description: |- JoinSubnets are used inside the OVN network topology. - Dual-stack clusters may set 2 subnets (one for each IP family), otherwise only 1 subnet is allowed. This field is only allowed for "Primary" network. It is not recommended to set this field without explicit need and understanding of the OVN network topology. @@ -73,13 +71,12 @@ spec: MTU is optional, if not provided, the globally configured value in OVN-Kubernetes (defaults to 1400) is used for the network. format: int32 maximum: 65536 - minimum: 0 + minimum: 576 type: integer role: description: |- Role describes the network role in the pod. - Allowed value is "Secondary". Secondary network is only assigned to pods that use `k8s.v1.cni.cncf.io/networks` annotation to select given network. enum: @@ -91,7 +88,6 @@ spec: Subnets are used for the pod network across the cluster. Dual-stack clusters may set 2 subnets (one for each IP family), otherwise only 1 subnet is allowed. - The format should match standard CIDR notation (for example, "10.128.0.0/16"). This field may be omitted. In that case the logical switch implementing the network only provides layer 2 communication, and users must configure IP addresses for the pods. As a consequence, Port security only prevents MAC spoofing. @@ -120,7 +116,6 @@ spec: description: |- JoinSubnets are used inside the OVN network topology. - Dual-stack clusters may set 2 subnets (one for each IP family), otherwise only 1 subnet is allowed. This field is only allowed for "Primary" network. It is not recommended to set this field without explicit need and understanding of the OVN network topology. @@ -134,17 +129,15 @@ spec: description: |- MTU is the maximum transmission unit for a network. - MTU is optional, if not provided, the globally configured value in OVN-Kubernetes (defaults to 1400) is used for the network. format: int32 maximum: 65536 - minimum: 0 + minimum: 576 type: integer role: description: |- Role describes the network role in the pod. - Allowed values are "Primary" and "Secondary". Primary network is automatically assigned to every pod created in the same namespace. Secondary network is only assigned to pods that use `k8s.v1.cni.cncf.io/networks` annotation to select given network. @@ -156,7 +149,6 @@ spec: description: |- Subnets are used for the pod network across the cluster. - Dual-stack clusters may set 2 subnets (one for each IP family), otherwise only 1 subnet is allowed. Given subnet is split into smaller subnets for every node. items: @@ -169,18 +161,20 @@ spec: description: |- HostSubnet specifies the subnet size for every node. - When not set, it will be assigned automatically. format: int32 maximum: 127 minimum: 1 type: integer + required: + - cidr type: object maxItems: 2 minItems: 1 type: array required: - role + - subnets type: object x-kubernetes-validations: - message: Subnets is required for Layer3 topology @@ -192,7 +186,6 @@ spec: description: |- Topology describes network configuration. - Allowed values are "Layer3", "Layer2". Layer3 topology creates a layer 2 segment per node, each with a different subnet. Layer 3 routing is used to interconnect node subnets. Layer2 topology creates one logical switch shared by all nodes. @@ -220,16 +213,8 @@ spec: properties: conditions: items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -270,12 +255,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -287,6 +267,9 @@ spec: - type type: object type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map type: object required: - spec diff --git a/dist/templates/ovnkube-control-plane.yaml.j2 b/dist/templates/ovnkube-control-plane.yaml.j2 index 0bf28ecf03..af72a364a2 100644 --- a/dist/templates/ovnkube-control-plane.yaml.j2 +++ b/dist/templates/ovnkube-control-plane.yaml.j2 @@ -146,6 +146,8 @@ spec: value: "{{ ovn_multi_network_enable }}" - name: OVN_NETWORK_SEGMENTATION_ENABLE value: "{{ ovn_network_segmentation_enable }}" + - name: OVN_ROUTE_ADVERTISEMENTS_ENABLE + value: "{{ ovn_route_advertisements_enable }}" - name: OVN_HYBRID_OVERLAY_NET_CIDR value: "{{ ovn_hybrid_overlay_net_cidr }}" - name: OVN_DISABLE_SNAT_MULTIPLE_GWS diff --git a/dist/templates/ovnkube-master.yaml.j2 b/dist/templates/ovnkube-master.yaml.j2 index 883bf3f7af..389a539dff 100644 --- a/dist/templates/ovnkube-master.yaml.j2 +++ b/dist/templates/ovnkube-master.yaml.j2 @@ -259,6 +259,8 @@ spec: value: "{{ ovn_multi_network_enable }}" - name: OVN_NETWORK_SEGMENTATION_ENABLE value: "{{ ovn_network_segmentation_enable }}" + - name: OVN_ROUTE_ADVERTISEMENTS_ENABLE + value: "{{ ovn_route_advertisements_enable }}" - name: OVN_EGRESSSERVICE_ENABLE value: "{{ ovn_egress_service_enable }}" - name: OVN_HYBRID_OVERLAY_NET_CIDR @@ -295,6 +297,10 @@ spec: value: "{{ ovn_enable_multi_external_gateway }}" - name: OVN_ENABLE_SVC_TEMPLATE_SUPPORT value: "{{ ovn_enable_svc_template_support }}" + - name: OVN_NOHOSTSUBNET_LABEL + value: "{{ ovn_nohostsubnet_label }}" + - name: OVN_DISABLE_REQUESTEDCHASSIS + value: "{{ ovn_disable_requestedchassis }}" - name: OVN_HOST_NETWORK_NAMESPACE valueFrom: configMapKeyRef: diff --git a/dist/templates/ovnkube-node.yaml.j2 b/dist/templates/ovnkube-node.yaml.j2 index 21c838f6c3..8fea157646 100644 --- a/dist/templates/ovnkube-node.yaml.j2 +++ b/dist/templates/ovnkube-node.yaml.j2 @@ -87,6 +87,10 @@ spec: - mountPath: /var/run/netns name: host-netns mountPropagation: Bidirectional + - mountPath: /run/systemd/private + name: run-systemd + subPath: private + readOnly: true {%- if ovnkube_app_name!="ovnkube-node-dpu-host" %} # ovnkube-node only mounts (non dpu related) - mountPath: /var/run/openvswitch/ @@ -149,7 +153,11 @@ spec: name: ovn-config key: routable_mtu optional: true + {%-if ovnkube_app_name=="ovnkube-node-dpu" %} + - name: K8S_NODE_DPU + {%- else %} - name: K8S_NODE + {%- endif %} valueFrom: fieldRef: fieldPath: spec.nodeName @@ -230,6 +238,8 @@ spec: value: "{{ ovn_multi_network_enable }}" - name: OVN_NETWORK_SEGMENTATION_ENABLE value: "{{ ovn_network_segmentation_enable }}" + - name: OVN_ROUTE_ADVERTISEMENTS_ENABLE + value: "{{ ovn_route_advertisements_enable }}" - name: OVN_ENABLE_INTERCONNECT value: "{{ ovn_enable_interconnect }}" - name: OVN_ENABLE_MULTI_EXTERNAL_GATEWAY @@ -257,13 +267,23 @@ spec: fieldPath: metadata.name readinessProbe: + {%- if ovnkube_app_name!="ovnkube-node-dpu" %} exec: command: ["/usr/bin/ovn-kube-util", "readiness-probe", "-t", "ovnkube-node"] initialDelaySeconds: 30 timeoutSeconds: 30 periodSeconds: 60 + {%- else %} + httpGet: + path: /metrics + port: 9476 + scheme: HTTP + initialDelaySeconds: 30 + timeoutSeconds: 5 + periodSeconds: 30 + {%- endif %} {% endif %} - {% if ovnkube_app_name=="ovnkube-node" -%} + {% if ovnkube_app_name!="ovnkube-node-dpu-host" -%} - name: ovn-controller image: "{{ ovn_image | default('docker.io/ovnkube/ovn-daemonset:latest') }}" imagePullPolicy: "{{ ovn_image_pull_policy | default('IfNotPresent') }}" @@ -306,7 +326,11 @@ spec: configMapKeyRef: name: ovn-config key: k8s_apiserver + {%- if ovnkube_app_name=="ovnkube-node-dpu" %} + - name: K8S_NODE_DPU + {%- else %} - name: K8S_NODE + {%- endif %} valueFrom: fieldRef: fieldPath: spec.nodeName @@ -408,6 +432,9 @@ spec: - name: host-netns hostPath: path: /var/run/netns + - name: run-systemd + hostPath: + path: /run/systemd {%- if ovnkube_app_name!="ovnkube-node-dpu-host" %} # non DPU related volumes - name: host-var-log-ovs diff --git a/dist/templates/ovnkube-single-node-zone.yaml.j2 b/dist/templates/ovnkube-single-node-zone.yaml.j2 index dd82cb4861..3007b7c19c 100644 --- a/dist/templates/ovnkube-single-node-zone.yaml.j2 +++ b/dist/templates/ovnkube-single-node-zone.yaml.j2 @@ -295,6 +295,10 @@ spec: - mountPath: /etc/ovn/ name: host-var-lib-ovs readOnly: true + - mountPath: /run/systemd/private + name: run-systemd + subPath: private + readOnly: true resources: requests: @@ -429,6 +433,8 @@ spec: value: "{{ ovn_multi_network_enable }}" - name: OVN_NETWORK_SEGMENTATION_ENABLE value: "{{ ovn_network_segmentation_enable }}" + - name: OVN_ROUTE_ADVERTISEMENTS_ENABLE + value: "{{ ovn_route_advertisements_enable }}" - name: OVNKUBE_NODE_MGMT_PORT_NETDEV value: "{{ ovnkube_node_mgmt_port_netdev }}" - name: OVN_EMPTY_LB_EVENTS @@ -446,6 +452,8 @@ spec: value: "local" - name: OVN_ENABLE_INTERCONNECT value: "{{ ovn_enable_interconnect }}" + - name: OVN_OBSERV_ENABLE + value: "{{ ovn_observ_enable }}" - name: OVN_ENABLE_MULTI_EXTERNAL_GATEWAY value: "{{ ovn_enable_multi_external_gateway }}" - name: OVN_ENABLE_OVNKUBE_IDENTITY @@ -620,6 +628,9 @@ spec: - name: host-var-lib-ovs hostPath: path: /var/lib/openvswitch + - name: run-systemd + hostPath: + path: /run/systemd tolerations: - operator: "Exists" diff --git a/dist/templates/ovnkube-zone-controller.yaml.j2 b/dist/templates/ovnkube-zone-controller.yaml.j2 index 5fa5b104c0..d5cb2a1282 100644 --- a/dist/templates/ovnkube-zone-controller.yaml.j2 +++ b/dist/templates/ovnkube-zone-controller.yaml.j2 @@ -345,6 +345,8 @@ spec: value: "{{ ovn_multi_network_enable }}" - name: OVN_NETWORK_SEGMENTATION_ENABLE value: "{{ ovn_network_segmentation_enable }}" + - name: OVN_ROUTE_ADVERTISEMENTS_ENABLE + value: "{{ ovn_route_advertisements_enable }}" - name: OVN_HYBRID_OVERLAY_NET_CIDR value: "{{ ovn_hybrid_overlay_net_cidr }}" - name: OVN_DISABLE_SNAT_MULTIPLE_GWS @@ -386,6 +388,8 @@ spec: value: "local" - name: OVN_ENABLE_DNSNAMERESOLVER value: "{{ ovn_enable_dnsnameresolver }}" + - name: OVN_OBSERV_ENABLE + value: "{{ ovn_observ_enable }}" # end of container volumes: diff --git a/dist/templates/rbac-ovnkube-cluster-manager.yaml.j2 b/dist/templates/rbac-ovnkube-cluster-manager.yaml.j2 index c7031b8be1..b658630b28 100644 --- a/dist/templates/rbac-ovnkube-cluster-manager.yaml.j2 +++ b/dist/templates/rbac-ovnkube-cluster-manager.yaml.j2 @@ -74,6 +74,8 @@ rules: - egressfirewalls - egressqoses - userdefinednetworks + - clusteruserdefinednetworks + - routeadvertisements verbs: [ "get", "list", "watch" ] - apiGroups: ["k8s.ovn.org"] resources: @@ -81,6 +83,10 @@ rules: - egressservices/status - userdefinednetworks - userdefinednetworks/status + - clusteruserdefinednetworks + - clusteruserdefinednetworks/status + - clusteruserdefinednetworks/finalizers + - routeadvertisements/status verbs: [ "patch", "update" ] - apiGroups: [""] resources: diff --git a/dist/templates/rbac-ovnkube-master.yaml.j2 b/dist/templates/rbac-ovnkube-master.yaml.j2 index 2d78c5dbd7..119f085657 100644 --- a/dist/templates/rbac-ovnkube-master.yaml.j2 +++ b/dist/templates/rbac-ovnkube-master.yaml.j2 @@ -84,6 +84,8 @@ rules: - egressservices - adminpolicybasedexternalroutes - userdefinednetworks + - clusteruserdefinednetworks + - routeadvertisements verbs: [ "get", "list", "watch" ] - apiGroups: ["k8s.cni.cncf.io"] resources: @@ -115,6 +117,10 @@ rules: - egressqoses/status - userdefinednetworks - userdefinednetworks/status + - clusteruserdefinednetworks + - clusteruserdefinednetworks/status + - clusteruserdefinednetworks/finalizers + - routeadvertisements/status verbs: [ "patch", "update" ] - apiGroups: [""] resources: diff --git a/dist/templates/rbac-ovnkube-node.yaml.j2 b/dist/templates/rbac-ovnkube-node.yaml.j2 index fe30d3440b..1e9e413c27 100644 --- a/dist/templates/rbac-ovnkube-node.yaml.j2 +++ b/dist/templates/rbac-ovnkube-node.yaml.j2 @@ -162,6 +162,7 @@ rules: - egressfirewalls/status - adminpolicybasedexternalroutes/status - egressqoses/status + - routeadvertisements/status verbs: [ "patch", "update" ] - apiGroups: ["policy.networking.k8s.io"] resources: @@ -181,6 +182,9 @@ rules: - egressqoses - egressservices - adminpolicybasedexternalroutes + - userdefinednetworks + - clusteruserdefinednetworks + - routeadvertisements verbs: [ "get", "list", "watch" ] {% if ovn_enable_ovnkube_identity == "true" -%} - apiGroups: ["certificates.k8s.io"] diff --git a/docs/api-reference/introduction.md b/docs/api-reference/introduction.md index 58b9b97206..d976372579 100644 --- a/docs/api-reference/introduction.md +++ b/docs/api-reference/introduction.md @@ -37,3 +37,4 @@ designed and implemented by OVN-Kubernetes * [EgressQoS](https://ovn-kubernetes.io/api-reference/egress-qos-api-spec/) * [EgressFirewall](https://ovn-kubernetes.io/api-reference/egress-firewall-api-spec/) * [AdminPolicyBasedExternalRoutes](https://ovn-kubernetes.io/api-reference/admin-epbr-api-spec/) +* [UserDefinedNetwork](https://ovn-kubernetes.io/api-reference/userdefinednetwork-api-spec/) diff --git a/docs/api-reference/userdefinednetwork-api-spec.md b/docs/api-reference/userdefinednetwork-api-spec.md new file mode 100644 index 0000000000..0ccdda7f06 --- /dev/null +++ b/docs/api-reference/userdefinednetwork-api-spec.md @@ -0,0 +1,322 @@ +# API Reference + +## Packages +- [k8s.ovn.org/v1](#k8sovnorgv1) + + +## k8s.ovn.org/v1 + +Package v1 contains API Schema definitions for the network v1 API group + +### Resource Types +- [ClusterUserDefinedNetwork](#clusteruserdefinednetwork) +- [ClusterUserDefinedNetworkList](#clusteruserdefinednetworklist) +- [UserDefinedNetwork](#userdefinednetwork) +- [UserDefinedNetworkList](#userdefinednetworklist) + + + +#### CIDR + +_Underlying type:_ _string_ + + + + + +_Appears in:_ +- [DualStackCIDRs](#dualstackcidrs) +- [Layer3Subnet](#layer3subnet) + + + +#### ClusterUserDefinedNetwork + + + +ClusterUserDefinedNetwork describe network request for a shared network across namespaces. + + + +_Appears in:_ +- [ClusterUserDefinedNetworkList](#clusteruserdefinednetworklist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `k8s.ovn.org/v1` | | | +| `kind` _string_ | `ClusterUserDefinedNetwork` | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[ClusterUserDefinedNetworkSpec](#clusteruserdefinednetworkspec)_ | | | Required: \{\}
| +| `status` _[ClusterUserDefinedNetworkStatus](#clusteruserdefinednetworkstatus)_ | | | | + + +#### ClusterUserDefinedNetworkList + + + +ClusterUserDefinedNetworkList contains a list of ClusterUserDefinedNetwork. + + + + + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `k8s.ovn.org/v1` | | | +| `kind` _string_ | `ClusterUserDefinedNetworkList` | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[ClusterUserDefinedNetwork](#clusteruserdefinednetwork) array_ | | | | + + +#### ClusterUserDefinedNetworkSpec + + + +ClusterUserDefinedNetworkSpec defines the desired state of ClusterUserDefinedNetwork. + + + +_Appears in:_ +- [ClusterUserDefinedNetwork](#clusteruserdefinednetwork) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `namespaceSelector` _[LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#labelselector-v1-meta)_ | NamespaceSelector Label selector for which namespace network should be available for. | | Required: \{\}
| +| `network` _[NetworkSpec](#networkspec)_ | Network is the user-defined-network spec | | Required: \{\}
| + + +#### ClusterUserDefinedNetworkStatus + + + +ClusterUserDefinedNetworkStatus contains the observed status of the ClusterUserDefinedNetwork. + + + +_Appears in:_ +- [ClusterUserDefinedNetwork](#clusteruserdefinednetwork) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#condition-v1-meta) array_ | Conditions slice of condition objects indicating details about ClusterUserDefineNetwork status. | | | + + +#### DualStackCIDRs + +_Underlying type:_ _[CIDR](#cidr)_ + + + +_Validation:_ +- MaxItems: 2 +- MinItems: 1 + +_Appears in:_ +- [Layer2Config](#layer2config) +- [Layer3Config](#layer3config) + + + +#### Layer2Config + + + + + + + +_Appears in:_ +- [NetworkSpec](#networkspec) +- [UserDefinedNetworkSpec](#userdefinednetworkspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `role` _[NetworkRole](#networkrole)_ | Role describes the network role in the pod.

Allowed value is "Secondary".
Secondary network is only assigned to pods that use `k8s.v1.cni.cncf.io/networks` annotation to select given network. | | Enum: [Primary Secondary]
Required: \{\}
| +| `mtu` _integer_ | MTU is the maximum transmission unit for a network.
MTU is optional, if not provided, the globally configured value in OVN-Kubernetes (defaults to 1400) is used for the network. | | Maximum: 65536
Minimum: 0
| +| `subnets` _[DualStackCIDRs](#dualstackcidrs)_ | Subnets are used for the pod network across the cluster.
Dual-stack clusters may set 2 subnets (one for each IP family), otherwise only 1 subnet is allowed.

The format should match standard CIDR notation (for example, "10.128.0.0/16").
This field may be omitted. In that case the logical switch implementing the network only provides layer 2 communication,
and users must configure IP addresses for the pods. As a consequence, Port security only prevents MAC spoofing. | | MaxItems: 2
MinItems: 1
| +| `joinSubnets` _[DualStackCIDRs](#dualstackcidrs)_ | JoinSubnets are used inside the OVN network topology.

Dual-stack clusters may set 2 subnets (one for each IP family), otherwise only 1 subnet is allowed.
This field is only allowed for "Primary" network.
It is not recommended to set this field without explicit need and understanding of the OVN network topology.
When omitted, the platform will choose a reasonable default which is subject to change over time. | | MaxItems: 2
MinItems: 1
| +| `ipamLifecycle` _[NetworkIPAMLifecycle](#networkipamlifecycle)_ | IPAMLifecycle controls IP addresses management lifecycle.

The only allowed value is Persistent. When set, OVN Kubernetes assigned IP addresses will be persisted in an
`ipamclaims.k8s.cni.cncf.io` object. These IP addresses will be reused by other pods if requested.
Only supported when "subnets" are set. | | Enum: [Persistent]
| + + +#### Layer3Config + + + + + + + +_Appears in:_ +- [NetworkSpec](#networkspec) +- [UserDefinedNetworkSpec](#userdefinednetworkspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `role` _[NetworkRole](#networkrole)_ | Role describes the network role in the pod.

Allowed values are "Primary" and "Secondary".
Primary network is automatically assigned to every pod created in the same namespace.
Secondary network is only assigned to pods that use `k8s.v1.cni.cncf.io/networks` annotation to select given network. | | Enum: [Primary Secondary]
Required: \{\}
| +| `mtu` _integer_ | MTU is the maximum transmission unit for a network.

MTU is optional, if not provided, the globally configured value in OVN-Kubernetes (defaults to 1400) is used for the network. | | Maximum: 65536
Minimum: 0
| +| `subnets` _[Layer3Subnet](#layer3subnet) array_ | Subnets are used for the pod network across the cluster.

Dual-stack clusters may set 2 subnets (one for each IP family), otherwise only 1 subnet is allowed.
Given subnet is split into smaller subnets for every node. | | MaxItems: 2
MinItems: 1
| +| `joinSubnets` _[DualStackCIDRs](#dualstackcidrs)_ | JoinSubnets are used inside the OVN network topology.

Dual-stack clusters may set 2 subnets (one for each IP family), otherwise only 1 subnet is allowed.
This field is only allowed for "Primary" network.
It is not recommended to set this field without explicit need and understanding of the OVN network topology.
When omitted, the platform will choose a reasonable default which is subject to change over time. | | MaxItems: 2
MinItems: 1
| + + +#### Layer3Subnet + + + + + + + +_Appears in:_ +- [Layer3Config](#layer3config) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `cidr` _[CIDR](#cidr)_ | CIDR specifies L3Subnet, which is split into smaller subnets for every node. | | | +| `hostSubnet` _integer_ | HostSubnet specifies the subnet size for every node.

When not set, it will be assigned automatically. | | Maximum: 127
Minimum: 1
| + + +#### NetworkIPAMLifecycle + +_Underlying type:_ _string_ + + + +_Validation:_ +- Enum: [Persistent] + +_Appears in:_ +- [Layer2Config](#layer2config) + +| Field | Description | +| --- | --- | +| `Persistent` | | + + +#### NetworkRole + +_Underlying type:_ _string_ + + + +_Validation:_ +- Enum: [Primary Secondary] + +_Appears in:_ +- [Layer2Config](#layer2config) +- [Layer3Config](#layer3config) + +| Field | Description | +| --- | --- | +| `Primary` | | +| `Secondary` | | + + +#### NetworkSpec + + + +NetworkSpec defines the desired state of UserDefinedNetworkSpec. + + + +_Appears in:_ +- [ClusterUserDefinedNetworkSpec](#clusteruserdefinednetworkspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `topology` _[NetworkTopology](#networktopology)_ | Topology describes network configuration.

Allowed values are "Layer3", "Layer2".
Layer3 topology creates a layer 2 segment per node, each with a different subnet. Layer 3 routing is used to interconnect node subnets.
Layer2 topology creates one logical switch shared by all nodes. | | Enum: [Layer2 Layer3]
Required: \{\}
| +| `layer3` _[Layer3Config](#layer3config)_ | Layer3 is the Layer3 topology configuration. | | | +| `layer2` _[Layer2Config](#layer2config)_ | Layer2 is the Layer2 topology configuration. | | | + + +#### NetworkTopology + +_Underlying type:_ _string_ + + + +_Validation:_ +- Enum: [Layer2 Layer3] + +_Appears in:_ +- [NetworkSpec](#networkspec) +- [UserDefinedNetworkSpec](#userdefinednetworkspec) + +| Field | Description | +| --- | --- | +| `Layer2` | | +| `Layer3` | | + + +#### UserDefinedNetwork + + + +UserDefinedNetwork describe network request for a Namespace. + + + +_Appears in:_ +- [UserDefinedNetworkList](#userdefinednetworklist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `k8s.ovn.org/v1` | | | +| `kind` _string_ | `UserDefinedNetwork` | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[UserDefinedNetworkSpec](#userdefinednetworkspec)_ | | | Required: \{\}
| +| `status` _[UserDefinedNetworkStatus](#userdefinednetworkstatus)_ | | | | + + +#### UserDefinedNetworkList + + + +UserDefinedNetworkList contains a list of UserDefinedNetwork. + + + + + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `k8s.ovn.org/v1` | | | +| `kind` _string_ | `UserDefinedNetworkList` | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[UserDefinedNetwork](#userdefinednetwork) array_ | | | | + + +#### UserDefinedNetworkSpec + + + +UserDefinedNetworkSpec defines the desired state of UserDefinedNetworkSpec. + + + +_Appears in:_ +- [UserDefinedNetwork](#userdefinednetwork) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `topology` _[NetworkTopology](#networktopology)_ | Topology describes network configuration.

Allowed values are "Layer3", "Layer2".
Layer3 topology creates a layer 2 segment per node, each with a different subnet. Layer 3 routing is used to interconnect node subnets.
Layer2 topology creates one logical switch shared by all nodes. | | Enum: [Layer2 Layer3]
Required: \{\}
| +| `layer3` _[Layer3Config](#layer3config)_ | Layer3 is the Layer3 topology configuration. | | | +| `layer2` _[Layer2Config](#layer2config)_ | Layer2 is the Layer2 topology configuration. | | | + + +#### UserDefinedNetworkStatus + + + +UserDefinedNetworkStatus contains the observed status of the UserDefinedNetwork. + + + +_Appears in:_ +- [UserDefinedNetwork](#userdefinednetwork) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#condition-v1-meta) array_ | | | | + + diff --git a/docs/ci/ci.md b/docs/ci/ci.md index 66a4857e37..394a4a0f01 100644 --- a/docs/ci/ci.md +++ b/docs/ci/ci.md @@ -119,7 +119,7 @@ and set the environmental variable `K8S_VERSION` to the same value. Also make su your go directory with `export GOPATH=(...)`. ``` -K8S_VERSION=v1.30.2 +K8S_VERSION=v1.31.0 git clone --single-branch --branch $K8S_VERSION https://github.com/kubernetes/kubernetes.git $GOPATH/src/k8s.io/kubernetes/ pushd $GOPATH/src/k8s.io/kubernetes/ make WHAT="test/e2e/e2e.test vendor/github.com/onsi/ginkgo/ginkgo cmd/kubectl" diff --git a/docs/design/service-traffic-policy.md b/docs/design/service-traffic-policy.md index 544eed0c6a..f5a7757709 100644 --- a/docs/design/service-traffic-policy.md +++ b/docs/design/service-traffic-policy.md @@ -323,7 +323,7 @@ vips : {"10.96.115.103:80"="172.19.0.3:8080", "172.19.0.3:31339"= 9. The routes in the host send this back to breth0: ``` -169.254.169.1 dev breth0 src 172.19.0.4 mtu 1400 +169.254.169.1 dev breth0 src 172.19.0.3 mtu 1400 ``` 10. Traffic leaves to primary interface from breth0: @@ -359,7 +359,7 @@ cookie=0xdeff105, duration=3189.786s, table=0, n_packets=99979, n_bytes=29802921 4. From OVN it gets sent back to host and then back from host into breth0 and into the wire: ``` - cookie=0xdeff105, duration=2334.510s, table=0, n_packets=18, n_bytes=1452, priority=175,ip,in_port="patch-breth0_ov",nw_src=172.19.0.4 actions=ct(table=4,zone=64001,nat) + cookie=0xdeff105, duration=2334.510s, table=0, n_packets=18, n_bytes=1452, priority=175,ip,in_port="patch-breth0_ov",nw_src=172.19.0.3 actions=ct(table=4,zone=64001,nat) cookie=0xdeff105, duration=2334.510s, table=4, n_packets=18, n_bytes=1452, ip actions=ct(commit,table=3,zone=64002,nat(src=169.254.169.1)) cookie=0xdeff105, duration=0.365s, table=3, n_packets=32, n_bytes=2808, actions=move:NXM_OF_ETH_DST[]->NXM_OF_ETH_SRC[],set_field:02:42:ac:13:00:03->eth_dst,LOCAL cookie=0xdeff105, duration=2334.510s, table=0, n_packets=7611, n_bytes=754388, priority=100,ip,in_port=LOCAL actions=ct(commit,zone=64000,exec(load:0x2->NXM_NX_CT_MARK[])),output:eth0 @@ -374,17 +374,17 @@ NOTE: We have added a masquerade rule to iptable rules to SNAT towards the netIP tcpdump: ``` SYN: -13:38:52.988279 eth0 In ifindex 19 02:42:df:4d:b6:d2 ethertype IPv4 (0x0800), length 80: 172.19.0.1.36363 > 172.19.0.4.30950: Flags [S], seq 3548868802, win 64240, options [mss 1460,sackOK,TS val 1854443570 ecr 0,nop,wscale 7], length 0 -13:38:52.988315 breth0 In ifindex 6 02:42:df:4d:b6:d2 ethertype IPv4 (0x0800), length 80: 172.19.0.1.36363 > 172.19.0.4.30950: Flags [S], seq 3548868802, win 64240, options [mss 1460,sackOK,TS val 1854443570 ecr 0,nop,wscale 7], length 0 +13:38:52.988279 eth0 In ifindex 19 02:42:df:4d:b6:d2 ethertype IPv4 (0x0800), length 80: 172.19.0.1.36363 > 172.19.0.3.30950: Flags [S], seq 3548868802, win 64240, options [mss 1460,sackOK,TS val 1854443570 ecr 0,nop,wscale 7], length 0 +13:38:52.988315 breth0 In ifindex 6 02:42:df:4d:b6:d2 ethertype IPv4 (0x0800), length 80: 172.19.0.1.36363 > 172.19.0.3.30950: Flags [S], seq 3548868802, win 64240, options [mss 1460,sackOK,TS val 1854443570 ecr 0,nop,wscale 7], length 0 13:38:52.988357 breth0 Out ifindex 6 02:42:ac:13:00:04 ethertype IPv4 (0x0800), length 80: 172.19.0.1.36363 > 10.96.211.228.80: Flags [S], seq 3548868802, win 64240, options [mss 1460,sackOK,TS val 1854443570 ecr 0,nop,wscale 7], length 0 -13:38:52.989240 breth0 In ifindex 6 02:42:ac:13:00:03 ethertype IPv4 (0x0800), length 80: 169.254.169.1.36363 > 172.19.0.3.8080: Flags [S], seq 3548868802, win 64240, options [mss 1460,sackOK,TS val 1854443570 ecr 0,nop,wscale 7], length 0 -13:38:52.989240 breth0 In ifindex 6 02:42:ac:13:00:03 ethertype IPv4 (0x0800), length 80: 172.19.0.4.31991 > 172.19.0.3.8080: Flags [S], seq 3548868802, win 64240, options [mss 1460,sackOK,TS val 1854443570 ecr 0,nop,wscale 7], length 0 +13:38:52.989240 breth0 In ifindex 6 02:42:ac:13:00:03 ethertype IPv4 (0x0800), length 80: 169.254.169.1.36363 > 172.19.0.4.8080: Flags [S], seq 3548868802, win 64240, options [mss 1460,sackOK,TS val 1854443570 ecr 0,nop,wscale 7], length 0 +13:38:52.989240 breth0 In ifindex 6 02:42:ac:13:00:03 ethertype IPv4 (0x0800), length 80: 172.19.0.3.31991 > 172.19.0.4.8080: Flags [S], seq 3548868802, win 64240, options [mss 1460,sackOK,TS val 1854443570 ecr 0,nop,wscale 7], length 0 SYNACK: -13:38:52.989515 breth0 Out ifindex 6 02:42:ac:13:00:04 ethertype IPv4 (0x0800), length 80: 172.19.0.3.8080 > 172.19.0.4.31991: Flags [S.], seq 3406651567, ack 3548868803, win 65160, options [mss 1460,sackOK,TS val 2294391439 ecr 1854443570,nop,wscale 7], length 0 -13:38:52.989515 breth0 Out ifindex 6 02:42:ac:13:00:04 ethertype IPv4 (0x0800), length 80: 172.19.0.3.8080 > 169.254.169.1.36363: Flags [S.], seq 3406651567, ack 3548868803, win 65160, options [mss 1460,sackOK,TS val 2294391439 ecr 1854443570,nop,wscale 7], length 0 +13:38:52.989515 breth0 Out ifindex 6 02:42:ac:13:00:04 ethertype IPv4 (0x0800), length 80: 172.19.0.4.8080 > 172.19.0.3.31991: Flags [S.], seq 3406651567, ack 3548868803, win 65160, options [mss 1460,sackOK,TS val 2294391439 ecr 1854443570,nop,wscale 7], length 0 +13:38:52.989515 breth0 Out ifindex 6 02:42:ac:13:00:04 ethertype IPv4 (0x0800), length 80: 172.19.0.4.8080 > 169.254.169.1.36363: Flags [S.], seq 3406651567, ack 3548868803, win 65160, options [mss 1460,sackOK,TS val 2294391439 ecr 1854443570,nop,wscale 7], length 0 13:38:52.989562 breth0 In ifindex 6 0a:58:a9:fe:a9:04 ethertype IPv4 (0x0800), length 80: 10.96.211.228.80 > 172.19.0.1.36363: Flags [S.], seq 3406651567, ack 3548868803, win 65160, options [mss 1460,sackOK,TS val 2294391439 ecr 1854443570,nop,wscale 7], length 0 -13:38:52.989571 breth0 Out ifindex 6 02:42:ac:13:00:04 ethertype IPv4 (0x0800), length 80: 172.19.0.4.30950 > 172.19.0.1.36363: Flags [S.], seq 3406651567, ack 3548868803, win 65160, options [mss 1460,sackOK,TS val 2294391439 ecr 1854443570,nop,wscale 7], length 0 -13:38:52.989581 eth0 Out ifindex 19 02:42:ac:13:00:04 ethertype IPv4 (0x0800), length 80: 172.19.0.4.30950 > 172.19.0.1.36363: Flags [S.], seq 3406651567, ack 3548868803, win 65160, options [mss 1460,sackOK,TS val 2294391439 ecr 1854443570,nop,wscale 7], length 0 +13:38:52.989571 breth0 Out ifindex 6 02:42:ac:13:00:04 ethertype IPv4 (0x0800), length 80: 172.19.0.3.30950 > 172.19.0.1.36363: Flags [S.], seq 3406651567, ack 3548868803, win 65160, options [mss 1460,sackOK,TS val 2294391439 ecr 1854443570,nop,wscale 7], length 0 +13:38:52.989581 eth0 Out ifindex 19 02:42:ac:13:00:04 ethertype IPv4 (0x0800), length 80: 172.19.0.3.30950 > 172.19.0.1.36363: Flags [S.], seq 3406651567, ack 3548868803, win 65160, options [mss 1460,sackOK,TS val 2294391439 ecr 1854443570,nop,wscale 7], length 0 ``` diff --git a/docs/features/cluster-egress-controls/egress-ip.md b/docs/features/cluster-egress-controls/egress-ip.md index 6eb81b94d2..d4cbd30b8c 100644 --- a/docs/features/cluster-egress-controls/egress-ip.md +++ b/docs/features/cluster-egress-controls/egress-ip.md @@ -11,7 +11,6 @@ For more info, consider looking at the following links: - [Assigning an egress IP address](https://docs.okd.io/latest/networking/ovn_kubernetes_network_provider/assigning-egress-ips-ovn.html) - [Managing Egress IP in OpenShift 4 with OVN-Kubernetes](https://rcarrata.com/openshift/egress-ip-ovn/) - ## Example An example of EgressIP might look like this: @@ -38,11 +37,16 @@ spec: It specifies to use `172.18.0.33` or `172.18.0.44` egressIP for pods that are labeled with `app: web` that run in a namespace without `environment: development` label. Both selectors use the [generic kubernetes label selectors](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors). -## Traffic flows +## Layer 3 network +Supported network configs: +- Cluster default network +- Role primary user defined networks + +### EgressIP IP is assigned to the primary host interface If the Egress IP(s) are hosted on the OVN primary network then the implementation is redirecting the POD traffic to an egress node where it is SNATed and sent out. -Using the example EgressIP and a matching pod with `10.244.1.3` IP, the following logical router policies are configured in `ovn_cluster_router`: +Using the example EgressIP and a matching pod attached to the cluster default network with `10.244.1.3` IP, the following logical router policies are configured in `ovn_cluster_router`: ```shell Routing Policies 1004 inport == "rtos-ovn-control-plane" && ip4.dst == 172.18.0.4 /* ovn-control-plane */ reroute 10.244.0.2 @@ -59,7 +63,7 @@ Routing Policies - Rules with `102` priority are added by OVN-Kubernetes when EgressIP feature is enabled, they ensure that east-west traffic is not using egress IPs. - The rule with `100` priority is added for the pod matching `egressip-prod` EgressIP, and it redirects the traffic to one of the egress nodes (ECMP is used to balance the traffic between next hops). -Once the redirected traffic reaches one of the egress nodes it gets SNATed in the gateway router: +For a pod attached to the cluster default network and once the redirected traffic reaches one of the egress nodes it gets SNATed in the gateway router: ```shell ovn-nbctl lr-nat-list GR_ovn-worker TYPE GATEWAY_PORT EXTERNAL_IP EXTERNAL_PORT LOGICAL_IP EXTERNAL_MAC LOGICAL_PORT @@ -71,6 +75,16 @@ TYPE GATEWAY_PORT EXTERNAL_IP EXTERNAL_PORT LOGIC snat 172.18.0.44 10.244.1.3 ``` +For a pod attached to a role primary user defined network "network1", there is no NAT entry for the pod attached to the egress OVN gateway and +instead a logical router policy is attached to the egress nodes OVN gateway router: +```shell +sh-5.2# ovn-nbctl lr-policy-list GR_network1_ovn-worker +Routing Policies + 95 ip4.src == 10.128.1.3 && pkt.mark == 0 allow pkt_mark=50006 +``` + +### EgressIP IP is assigned to a secondary host interface +Note that this is unsupported for user defined networks. Lets now imagine the Egress IP(s) mentioned previously, are not hosted by the OVN primary network and is hosted by a secondary host network which is assigned to a standard linux interface, a redirect to the egress-able node management port IP address: ```shell @@ -140,6 +154,12 @@ is created within the chain `OVN-KUBE-EGRESS-IP-Multi-NIC` for each selected pod egress-ing a particular interface. The routing table number `1111` is generated from the interface name. Routes within the main routing table who's output interface share the same interface used for Egress IP are also cloned into the VRF 1111. +## Layer 2 network +Not supported + +## Localnet +Not supported + ### Pod to node IP traffic When a cluster networked pod matched by an egress IP tries to connect to a non-local node IP it hits the following logical router policy in `ovn_cluster_router`: diff --git a/docs/features/multiple-networks/multi-homing.md b/docs/features/multiple-networks/multi-homing.md index d361aa1e1b..fd89716516 100644 --- a/docs/features/multiple-networks/multi-homing.md +++ b/docs/features/multiple-networks/multi-homing.md @@ -169,12 +169,66 @@ localnet network. IP addresses in a `ipamclaims.k8s.cni.cncf.io` object. This IP addresses will be reused by other pods if requested. Useful for KubeVirt VMs. Only makes sense if the `subnets` attribute is also defined. +- `physicalNetworkName` (string, optional): the name of the physical network to + which the OVN overlay will connect. When omitted, it will default to the value + of the localnet network `name`. **NOTE** - when the subnets attribute is omitted, the logical switch implementing the network will only provide layer 2 communication, and the users must configure IPs for the pods. Port security will only prevent MAC spoofing. +#### Sharing the same physical network mapping +To prevent the admin from having to reconfigure the cluster nodes whenever they +want to - let's say - add a VLAN, OVN-Kubernetes allows multiple network +overlays to re-use the same physical network mapping. + +To do this, the cluster admin would provision two different networks (with +different VLAN tags) using the **same** physical network name. Please check the +example below for an example of this configuration: +```yaml +--- +apiVersion: k8s.cni.cncf.io/v1 +kind: NetworkAttachmentDefinition +metadata: + name: bluenet + namespace: test +spec: + config: | + { + "cniVersion": "0.3.1", + "name": "tenantblue", + "type": "ovn-k8s-cni-overlay", + "topology": "localnet", + "netAttachDefName": "test/bluenet", + "vlanID": 4000, + "physicalNetworkName": "physnet" + } +--- +apiVersion: k8s.cni.cncf.io/v1 +kind: NetworkAttachmentDefinition +metadata: + name: isolatednet + namespace: test +spec: + config: | + { + "cniVersion": "0.3.1", + "name": "sales", + "type": "ovn-k8s-cni-overlay", + "topology": "localnet", + "netAttachDefName": "test/isolatednet", + "vlanID": 1234, + "physicalNetworkName": "physnet" + } +``` + +> [!WARNING] +> Keep in mind OVN-Kubernetes does **not** validate the physical network +> configurations in any way: the admin must ensure these configurations are +> holistically healthy - e.g. the defined subnets do not overlap, the MTUs make +> sense, etc. + ## Pod configuration The user must specify the secondary network attachments via the `k8s.v1.cni.cncf.io/networks` annotation. @@ -309,8 +363,28 @@ client application that created them in the first place. In this case, KubeVirt. This feature is described in detail in the following KubeVirt [design proposal](https://github.com/kubevirt/community/pull/279). +## IPv4 and IPv6 dynamic configuration for virtualization workloads on L2 primary UDN +For virtualization workloads using a primary UDN with layer2 topology ovn-k +configure some DHCP and NDP flows to server ipv4 and ipv6 configuration for them. + +For both ipv4 and ipv6 the following parameters are configured using DHCP or RAs: +- address +- gateway +- dns (read notes below) +- hostname (vm's name) +- mtu (taken from network attachment definition) + +### Configuring dns server +By default the DHCP server at ovn-kuberntes will configure the kubernetes +default dns service `kube-system/kube-dns` as the name server. This can be +overridden with the following command line options: +- dns-service-namespace +- dns-service-name + ## Limitations OVN-K currently does **not** support: - the same attachment configured multiple times in the same pod - i.e. `k8s.v1.cni.cncf.io/networks: l3-network,l3-network` is invalid. - updates to the network selection elements lists - i.e. `k8s.v1.cni.cncf.io/networks` annotation +- IPv6 link local addresses not derived from the MAC address as described in RFC 2373, like Privacy Extensions defined by RFC 4941, + or the Opaque Identifier generation methods defined in RFC 7217. diff --git a/docs/features/network-security-controls/admin-network-policy.md b/docs/features/network-security-controls/admin-network-policy.md index dbf895cd40..7b40b68842 100644 --- a/docs/features/network-security-controls/admin-network-policy.md +++ b/docs/features/network-security-controls/admin-network-policy.md @@ -164,10 +164,10 @@ spec: to: - pods: namespaceSelector: - matchlabels: + matchLabels: kubernetes.io/metadata.name: kube-system podSelector: - matchlabels: + matchLabels: app: dns ports: - portNumber: @@ -188,7 +188,7 @@ spec: action: "Allow" to: - namespaces: - matchlabels: + matchLabels: tenant: splunk ports: - portNumber: @@ -280,14 +280,12 @@ spec: - name: "default-deny" action: "Deny" from: - - namespaces: - namespaceSelector: {} + - namespaces: {} egress: - name: "default-deny" action: "Deny" to: - - namespaces: - namespaceSelector: {} + - namespaces: {} ``` You can do `oc apply -f default.yaml` to create this ANP in your cluster diff --git a/docs/images/ovnkube-observ.png b/docs/images/ovnkube-observ.png new file mode 100644 index 0000000000..c8644d368b Binary files /dev/null and b/docs/images/ovnkube-observ.png differ diff --git a/docs/installation/launching-ovn-kubernetes-on-kind.md b/docs/installation/launching-ovn-kubernetes-on-kind.md index f4a5a86eef..8309349d89 100644 --- a/docs/installation/launching-ovn-kubernetes-on-kind.md +++ b/docs/installation/launching-ovn-kubernetes-on-kind.md @@ -370,14 +370,14 @@ sudo ln -s /usr/bin/kubectl-v1.17.3 /usr/bin/kubectl Download and install latest version of `kubectl`: ``` -$ K8S_VERSION=v1.30.2 +$ K8S_VERSION=v1.31.0 $ curl -LO https://storage.googleapis.com/kubernetes-release/release/$K8S_VERSION/bin/linux/amd64/kubectl $ chmod +x kubectl $ sudo mv kubectl /usr/bin/kubectl-$K8S_VERSION $ sudo rm /usr/bin/kubectl $ sudo ln -s /usr/bin/kubectl-$K8S_VERSION /usr/bin/kubectl $ kubectl version --client -Client Version: v1.30.2 +Client Version: v1.31.0 Kustomize Version: v5.0.4-0.20230601165947-6ce0bf390ce3 ``` @@ -426,7 +426,7 @@ $ cd ../dist/images/ $ make fedora $ cd ../../contrib/ -$ KIND_IPV4_SUPPORT=true KIND_IPV6_SUPPORT=true K8S_VERSION=v1.30.2 ./kind.sh +$ KIND_IPV4_SUPPORT=true KIND_IPV6_SUPPORT=true K8S_VERSION=v1.31.0 ./kind.sh ``` Once `kind.sh` completes, setup kube config file: @@ -452,7 +452,7 @@ one (or both of) the following variables: ``` $ cd ../../contrib/ -$ KIND_IMAGE=example.com/kindest/node K8S_VERSION=v1.30.2 ./kind.sh +$ KIND_IMAGE=example.com/kindest/node K8S_VERSION=v1.31.0 ./kind.sh ``` ### Using kind local registry to deploy non ovn-k containers diff --git a/docs/observability/ovn-observability.md b/docs/observability/ovn-observability.md new file mode 100644 index 0000000000..0507b9e725 --- /dev/null +++ b/docs/observability/ovn-observability.md @@ -0,0 +1,131 @@ +# Observability + +## Introduction + +Observability feature uses OVN sampling functionality to generate samples with requested metadata when +specific OVS flows are matched. To see the generated samples, a binary called `ovnkube-observ` is used. +This binary allows printing the samples to stdout or writing them to a file. + +Currently, supports observability for: +- Network Policy +- (Baseline) Admin Network Policy +- Egress firewall +- UDN isolation +- Multicast ACLs + +More features are planned to be added in the future. + +## Motivation + +Networking observability is an important feature to verify the expected networking behavior in a cluster and +to debug existing problems. +Ovn-kubernetes makes use of many abstraction layers (through NBDB, logical flows, openflow flows and datapath flows) +that translate kubernetes feature into very specific rules that apply +to each packet in the network. Therefore, even though there are ways to see what OVS/OVN is doing with a particular packet, +there is no way to know why. + +We aim to solve this problem by providing a way for ovn-kubernetes to generate packet samples enriched with metadata +that can be easily correlated back to kubernetes objects or other human-readable pieces of information that provide +insights of what ovn-kubernetes is doing with a packet and why. + +### User-Stories/Use-Cases + +- As a user I want to make sure that the network policies/egress firewalls/etc. are correctly enforced in my cluster. +- As a cluster admin I want to check why some traffic is allowed or dropped. + +## How to enable this feature on an OVN-Kubernetes cluster? + +To enable this feature, use `--observability` flag with `kind.sh` script or `--enable-observability` flag with `ovnkube` binary. + +To see the samples, use `ovnkube-observ` binary, use `-h` to see allowed flags. + +This feature requires OVS 3.4 and linux kernel 6.11. + +As of Aug 2024, the kernel need to be built from the source, therefore to try this feature you need to: +- rebuild the kernel with the current master branch from [Linus' tree](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git) + - to rebuild on fedora: https://docs.fedoraproject.org/en-US/quick-docs/kernel-build-custom/#_building_a_vanilla_upstream_kernel +- Build an ovn-kubernetes image that uses the latest OVS/OVN code: +`OVS_BRANCH=main make -C dist/images fedora-dev` +- Start kind with that image, use `-ov localhost/ovn-kube-fedora-dev:latest` flag with `kind.sh` script. + +## Workflow Description + +- Observability is enabled by setting the `--enable-observability` flag in the `ovnkube` binary. +- For now all mentioned features are enabled by this flag at the same time. +- `ovnkube-observ` binary is used to see the samples. Samples are only generated when the real traffic matching the ACLs +is sent through the OVS. An example output is: +``` +OVN-K message: Allowed by default allow from local node policy, direction ingress +src=10.129.2.2, dst=10.129.2.5 +``` + +## Implementation Details + +### User facing API Changes + +No API changes were done. + +### OVN sampling details + +OVN has 3 main db tables that are used for sampling: +- `Sample_collector`: This table is used to define the sampling collector. It defines the sampling rate and collectorID, +which is used to set up collectors in the OVS. +- `Sampling_app`: This table is used to set `ID`s for existing OVN sampling applications, that are sent together with the samples. +- `Sample`: This table is used to define required samples and point to the collectors. +Every sample has `Metadata` that is sent together with the sample. + +Samples are attached to the other db tables, for now only to ACLs. +A sample is generated when a packet matches the ACL. Every Sample contains `Sampling_app.ID` and `Sample.Metadata`, +that is decoded by `go-controller/observability-lib`. + +### OVN-Kubernetes Implementation Details + +`Sample_collector` and `Sampling_app` are created or cleaned up when the observability is enabled/disabled on startup. +When one of the supported objects (for example, network policy) is created, ovn-kuberentes generates an nbdb `Sample` for it. + +To decode the samples into human-readable information, `go-controller/observability-lib` is used. It finds `Sample` +by the attached `Sample.Metadata` and then gets corresponding db object based on `Sampling_add.ID` and `Sample.UUID`. +The message is then constructed using db object `external_ids`. + +### Full stack architecture + +![ovnkube-observ](../images/ovnkube-observ.png) + +The diagram shows how all involved components (kernel, OVS, OVN, ovn-kubernetes) are connected. + +## Best Practices + +TDB + +## Future Items + +Add more features support, for example, egress IP or load balancing. + +## Known Limitations + +Current version of `ovnkube-observ` only works in OVN-IC mode, as it requires `nbdb` to be available locally via unix socket. +In the future non-IC will also be supported with provided `nbdb` address and certificates. + +Only default network observability is supported for now, secondary-network observability will be added later. + +Sample ID for ACL is stored in conntrack when the new session is established and is never updated until the session is closed. +That means, some samples may be removed from nbdb, but still be present in the generated samples. It implies: +- ACL-based sampling only affects newly established connections: if a session was already established before the sampling was enabled, +the session will not be sampled. +- If a session is established with enabled sampling, disabling sampling won't affect that session, and it will continue +generating samples until the session is closed. +- If the sample was removed from nbdb (e.g. when sampling is disabled for a given connection or when ACL is updated on network policy +update or delete) generated samples won't be decoded, as required data is not present in nbdb anymore. + +Due to OVN limitations, some samples can only be generated on the first packet of a connection. +This applies to +- egress firewall, as it doesn't submit a flow to conntrack. +- multiple ACLs on the same direction, as only last-tier ACL will be submitted to conntrack. For now this applies to + - ANP + network policy + - ANP + BANP + + in both cases ANP will have only first-packet sample. + +## References + +NONE diff --git a/go-controller/Makefile b/go-controller/Makefile index 5d73ccd4c2..5465e5f2e7 100644 --- a/go-controller/Makefile +++ b/go-controller/Makefile @@ -22,7 +22,9 @@ else CONTAINER_RUNTIME=docker endif CONTAINER_RUNNABLE ?= $(shell $(CONTAINER_RUNTIME) -v > /dev/null 2>&1; echo $$?) -OVN_SCHEMA_VERSION ?= v24.03.1 +# FIXME(tssurya): In one week when OVN 24.09 is released change the schema version +OVN_SCHEMA_VERSION ?= 8efac26f6637fc +OVS_VERSION ?= v2.17.0 ifeq ($(NOROOT),TRUE) C_ARGS = -e NOROOT=TRUE else @@ -50,7 +52,7 @@ export NOROOT # (disables symbol table and DWARF generation when building ovnk binaries) all build: - hack/build-go.sh cmd/ovnkube cmd/ovn-k8s-cni-overlay cmd/ovn-kube-util hybrid-overlay/cmd/hybrid-overlay-node cmd/ovndbchecker cmd/ovnkube-trace cmd/ovnkube-identity + hack/build-go.sh cmd/ovnkube cmd/ovn-k8s-cni-overlay cmd/ovn-kube-util hybrid-overlay/cmd/hybrid-overlay-node cmd/ovndbchecker cmd/ovnkube-trace cmd/ovnkube-identity cmd/ovnkube-observ windows: WINDOWS_BUILD="yes" hack/build-go.sh hybrid-overlay/cmd/hybrid-overlay-node @@ -64,7 +66,7 @@ else RACE=1 hack/test-go.sh endif -modelgen: pkg/nbdb/ovn-nb.ovsschema pkg/sbdb/ovn-sb.ovsschema +modelgen: pkg/nbdb/ovn-nb.ovsschema pkg/sbdb/ovn-sb.ovsschema pkg/vswitchd/vswitch.ovsschema hack/update-modelgen.sh codegen: @@ -85,6 +87,7 @@ clean: rm -rf ${TEST_REPORT_DIR} rm -f ./pkg/nbdb/ovn-nb.ovsschema rm -f ./pkg/sbdb/ovn-sb.ovsschema + rm -f ./pkg/vswitchd/vswitch.ovsschema .PHONY: lint gofmt @@ -108,6 +111,9 @@ pkg/nbdb/ovn-nb.ovsschema: pkg/sbdb/ovn-sb.ovsschema: curl -sSL https://raw.githubusercontent.com/ovn-org/ovn/$(OVN_SCHEMA_VERSION)/ovn-sb.ovsschema -o $@ +pkg/vswitchd/vswitch.ovsschema: + curl -sSL https://raw.githubusercontent.com/openvswitch/ovs/${OVS_VERSION}/vswitchd/vswitch.ovsschema -o $@ + ${TOOLS_OUTPUT_DIR}: mkdir -p ${TOOLS_OUTPUT_DIR} diff --git a/go-controller/cmd/ovn-kube-util/app/ovs-exporter.go b/go-controller/cmd/ovn-kube-util/app/ovs-exporter.go index 5d4e1f65a4..21bcf7199f 100644 --- a/go-controller/cmd/ovn-kube-util/app/ovs-exporter.go +++ b/go-controller/cmd/ovn-kube-util/app/ovs-exporter.go @@ -6,6 +6,7 @@ import ( "net/http" "time" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -13,6 +14,8 @@ import ( kexec "k8s.io/utils/exec" ) +var metricsScrapeInterval int + var OvsExporterCommand = cli.Command{ Name: "ovs-exporter", Usage: "", @@ -21,6 +24,12 @@ var OvsExporterCommand = cli.Command{ Name: "metrics-bind-address", Usage: `The IP address and port for the metrics server to serve on (default ":9310")`, }, + &cli.IntFlag{ + Name: "metrics-interval", + Usage: "The interval in seconds at which ovs metrics are collected", + Value: 30, + Destination: &metricsScrapeInterval, + }, }, Action: func(ctx *cli.Context) error { stopChan := make(chan struct{}) @@ -33,11 +42,17 @@ var OvsExporterCommand = cli.Command{ return err } + // start the ovsdb client for ovs metrics monitoring + ovsClient, err := libovsdb.NewOVSClient(stopChan) + if err != nil { + klog.Errorf("Error initializing ovs client: %v", err) + } + mux := http.NewServeMux() mux.Handle("/metrics", promhttp.Handler()) // register ovs metrics that will be served off of /metrics path - metrics.RegisterStandaloneOvsMetrics(stopChan) + metrics.RegisterStandaloneOvsMetrics(ovsClient, metricsScrapeInterval, stopChan) server := &http.Server{Addr: bindAddress, Handler: mux} go func() { diff --git a/go-controller/cmd/ovnkube-observ/ovnkubeobserv.go b/go-controller/cmd/ovnkube-observ/ovnkubeobserv.go new file mode 100644 index 0000000000..1c55dcee0c --- /dev/null +++ b/go-controller/cmd/ovnkube-observ/ovnkubeobserv.go @@ -0,0 +1,41 @@ +package main + +import ( + "context" + "flag" + "fmt" + "os" + "os/signal" + "syscall" + + observ "github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib" +) + +func main() { + ctx, cancel := context.WithCancel(context.Background()) + sigc := make(chan os.Signal, 1) + signal.Notify(sigc, + syscall.SIGHUP, + syscall.SIGINT, + syscall.SIGTERM, + syscall.SIGQUIT) + go func() { + <-sigc + fmt.Println("Received a signal, terminating.") + cancel() + }() + enableDecoder := flag.Bool("enable-enrichment", true, "Enrich samples with nbdb data.") + logCookie := flag.Bool("log-cookie", false, "Print raw sample cookie with psample group_id.") + printPacket := flag.Bool("print-full-packet", false, "Print full received packet. When false, only src and dst ips are printed with every sample.") + addOVSCollector := flag.Bool("add-ovs-collector", false, "Add ovs collector to enable sampling. Use with caution. Make sure no one else is using observability.") + outputFile := flag.String("output-file", "", "Output file to write the samples to.") + filterSrcIP := flag.String("filter-src-ip", "", "Filter in only packets from a given source ip.") + filterDstIP := flag.String("filter-dst-ip", "", "Filter in only packets to a given destination ip.") + flag.Parse() + + reader := observ.NewSampleReader(*enableDecoder, *logCookie, *printPacket, *addOVSCollector, *filterSrcIP, *filterDstIP, *outputFile) + err := reader.ReadSamples(ctx) + if err != nil { + fmt.Println(err.Error()) + } +} diff --git a/go-controller/cmd/ovnkube-trace/ovnkube-trace.go b/go-controller/cmd/ovnkube-trace/ovnkube-trace.go index 9c2548ead1..c985499563 100644 --- a/go-controller/cmd/ovnkube-trace/ovnkube-trace.go +++ b/go-controller/cmd/ovnkube-trace/ovnkube-trace.go @@ -255,28 +255,13 @@ func isRoutingViaHost(coreclient *corev1client.CoreV1Client, restconfig *rest.Co } // getPodMAC returns the pod's MAC address. -func getPodMAC(client *corev1client.CoreV1Client, pod *kapi.Pod) (podMAC string, err error) { - if pod.Spec.HostNetwork { - node, err := client.Nodes().Get(context.TODO(), pod.Spec.NodeName, metav1.GetOptions{}) - if err != nil { - return "", err - } - - nodeMAC, err := util.ParseNodeManagementPortMACAddresses(node, types.DefaultNetworkName) - if err != nil { - return "", err - } - if nodeMAC != nil { - podMAC = nodeMAC.String() - } - } else { - podAnnotation, err := util.UnmarshalPodAnnotation(pod.ObjectMeta.Annotations, types.DefaultNetworkName) - if err != nil { - return "", err - } - if podAnnotation != nil { - podMAC = podAnnotation.MAC.String() - } +func getPodMAC(pod *kapi.Pod) (podMAC string, err error) { + podAnnotation, err := util.UnmarshalPodAnnotation(pod.ObjectMeta.Annotations, types.DefaultNetworkName) + if err != nil { + return "", err + } + if podAnnotation != nil { + podMAC = podAnnotation.MAC.String() } return podMAC, nil @@ -485,18 +470,30 @@ func getPodInfo(coreclient *corev1client.CoreV1Client, restconfig *rest.Config, return nil, err } - // Get the pod's MAC address. - podInfo.MAC, err = getPodMAC(coreclient, pod) - if err != nil { - klog.V(1).Infof("Problem obtaining Ethernet address of Pod %s in namespace %s\n", podName, namespace) - return nil, err - } - podInfo, err = getDatabaseURIs(coreclient, restconfig, ovnNamespace, podInfo) if err != nil { klog.Exitf("Failed to get database URIs: %v\n", err) } + // Get the pod's MAC address. + // If hostnetwork, use mp0 mac + if pod.Spec.HostNetwork { + podInfo.OvnK8sMp0PortName = types.K8sMgmtIntfName + portCmd := fmt.Sprintf("ovs-vsctl get Interface %s mac_in_use", podInfo.OvnK8sMp0PortName) + localOutput, localError, err := execInPod(coreclient, restconfig, ovnNamespace, podInfo.OvnKubePodName, podInfo.OvnKubeContainerName, portCmd, "") + if err != nil { + return nil, fmt.Errorf("execInPod() failed. err: %s, stderr: %s, stdout: %s, podInfo: %v", err, localError, localOutput, podInfo) + } + localOutput = strings.ReplaceAll(localOutput, "\n", "") + podInfo.MAC = strings.ReplaceAll(localOutput, "\"", "") + } else { + podInfo.MAC, err = getPodMAC(pod) + if err != nil { + klog.V(1).Infof("Problem obtaining Ethernet address of Pod %s in namespace %s\n", podName, namespace) + return nil, err + } + } + // Find rtos MAC (this is the pod's first hop router). podInfo.RtosMAC, err = getRouterPortMacAddress(coreclient, restconfig, podInfo, ovnNamespace, types.RouterToSwitchPrefix) if err != nil { diff --git a/go-controller/cmd/ovnkube/ovnkube.go b/go-controller/cmd/ovnkube/ovnkube.go index b441d38989..aab9e51509 100644 --- a/go-controller/cmd/ovnkube/ovnkube.go +++ b/go-controller/cmd/ovnkube/ovnkube.go @@ -287,7 +287,14 @@ func startOvnKube(ctx *cli.Context, cancel context.CancelFunc) error { }() if config.Kubernetes.BootstrapKubeconfig != "" { - if err := util.StartNodeCertificateManager(ctx.Context, ovnKubeStartWg, os.Getenv("K8S_NODE"), &config.Kubernetes); err != nil { + // In the case of dpus K8S_NODE will be set to dpu host's name + var csrNodeName string + if config.OvnKubeNode.Mode == types.NodeModeDPU { + csrNodeName = os.Getenv("K8S_NODE_DPU") + } else { + csrNodeName = os.Getenv("K8S_NODE") + } + if err := util.StartNodeCertificateManager(ctx.Context, ovnKubeStartWg, csrNodeName, &config.Kubernetes); err != nil { return fmt.Errorf("failed to start the node certificate manager: %w", err) } } @@ -567,10 +574,18 @@ func runOvnKube(ctx context.Context, runMode *ovnkubeRunMode, ovnClientset *util // start the prometheus server to serve OVS and OVN Metrics (default port: 9476) // Note: for ovnkube node mode dpu-host no metrics is required as ovs/ovn is not running on the node. if config.OvnKubeNode.Mode != types.NodeModeDPUHost && config.Metrics.OVNMetricsBindAddress != "" { + metricsScrapeInterval := 30 + defer cancel() + + ovsClient, err := libovsdb.NewOVSClient(ctx.Done()) + if err != nil { + return fmt.Errorf("failed to initialize libovsdb vswitchd client: %w", err) + } if config.Metrics.ExportOVSMetrics { - metrics.RegisterOvsMetricsWithOvnMetrics(ctx.Done()) + metrics.RegisterOvsMetricsWithOvnMetrics(ovsClient, metricsScrapeInterval, ctx.Done()) } - metrics.RegisterOvnMetrics(ovnClientset.KubeClient, runMode.identity, ctx.Done()) + metrics.RegisterOvnMetrics(ovnClientset.KubeClient, runMode.identity, + ovsClient, metricsScrapeInterval, ctx.Done()) metrics.StartOVNMetricsServer(config.Metrics.OVNMetricsBindAddress, config.Metrics.NodeServerCert, config.Metrics.NodeServerPrivKey, ctx.Done(), wg) } diff --git a/go-controller/go.mod b/go-controller/go.mod index 537a2e0a13..a5b34faa02 100644 --- a/go-controller/go.mod +++ b/go-controller/go.mod @@ -7,15 +7,17 @@ require ( github.com/alexflint/go-filemutex v1.2.0 github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d github.com/bhendo/go-powershell v0.0.0-20190719160123-219e7fb4e41e - github.com/cenkalti/backoff/v4 v4.2.1 + github.com/cenkalti/backoff/v4 v4.3.0 github.com/containernetworking/cni v1.1.2 github.com/containernetworking/plugins v1.2.0 github.com/coreos/go-iptables v0.6.0 + github.com/coreos/go-systemd/v22 v22.5.0 github.com/fsnotify/fsnotify v1.7.0 github.com/gaissmai/cidrtree v0.1.4 - github.com/go-logr/logr v1.4.1 + github.com/go-logr/logr v1.4.2 github.com/go-logr/stdr v1.2.2 github.com/google/go-cmp v0.6.0 + github.com/google/gopacket v1.1.19 github.com/google/uuid v1.6.0 github.com/gorilla/mux v1.8.0 github.com/k8snetworkplumbingwg/govdpa v0.1.5-0.20230926073613-07c1031aea47 @@ -27,65 +29,67 @@ require ( github.com/mdlayher/ndp v1.0.1 github.com/miekg/dns v1.1.31 github.com/mitchellh/copystructure v1.2.0 - github.com/onsi/ginkgo v1.16.5 - github.com/onsi/gomega v1.32.0 + github.com/onsi/ginkgo/v2 v2.19.0 + github.com/onsi/gomega v1.33.1 github.com/openshift/api v0.0.0-20231120222239-b86761094ee3 github.com/openshift/client-go v0.0.0-20231121143148-910ca30a1a9a - github.com/ovn-org/libovsdb v0.6.1-0.20240125124854-03f787b1a892 - github.com/prometheus/client_golang v1.18.0 - github.com/prometheus/client_model v0.5.0 + github.com/ovn-org/libovsdb v0.7.1-0.20240820095311-ce1951614a20 + github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/client_model v0.6.1 github.com/safchain/ethtool v0.3.1-0.20231027162144-83e5e0097c91 github.com/spf13/afero v1.9.5 - github.com/stretchr/testify v1.8.4 - github.com/urfave/cli/v2 v2.2.0 + github.com/stretchr/testify v1.9.0 + github.com/urfave/cli/v2 v2.27.2 github.com/vishvananda/netlink v1.2.1-beta.2.0.20231024175852-77df5d35f725 golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 - golang.org/x/net v0.25.0 + golang.org/x/net v0.26.0 golang.org/x/sync v0.7.0 - golang.org/x/sys v0.20.0 + golang.org/x/sys v0.21.0 golang.org/x/time v0.3.0 - google.golang.org/grpc v1.62.1 + google.golang.org/grpc v1.65.0 google.golang.org/grpc/security/advancedtls v0.0.0-20240425232638-1e8b9b7fc655 - google.golang.org/protobuf v1.33.0 + google.golang.org/protobuf v1.34.2 gopkg.in/fsnotify/fsnotify.v1 v1.4.7 gopkg.in/gcfg.v1 v1.2.3 gopkg.in/natefinch/lumberjack.v2 v2.2.1 - k8s.io/api v0.30.2 - k8s.io/apimachinery v0.30.2 - k8s.io/client-go v0.30.2 - k8s.io/component-helpers v0.30.2 - k8s.io/klog/v2 v2.120.1 - k8s.io/kubernetes v1.30.2 - k8s.io/utils v0.0.0-20230726121419-3b25d923346b + k8s.io/api v0.31.1 + k8s.io/apimachinery v0.31.1 + k8s.io/client-go v0.31.1 + k8s.io/component-helpers v0.31.1 + k8s.io/klog/v2 v2.130.1 + k8s.io/kubernetes v1.31.1 + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 kubevirt.io/api v1.0.0-alpha.0 - sigs.k8s.io/controller-runtime v0.18.4 + sigs.k8s.io/controller-runtime v0.19.0 + sigs.k8s.io/knftables v0.0.18 sigs.k8s.io/network-policy-api v0.1.5 sigs.k8s.io/structured-merge-diff/v4 v4.4.1 + sigs.k8s.io/yaml v1.4.0 ) -// OCPHACK -require sigs.k8s.io/knftables v0.0.16 - require ( github.com/Microsoft/go-winio v0.6.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/hub v1.0.1 // indirect github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-openapi/swag v0.22.4 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -94,50 +98,48 @@ require ( github.com/juju/errors v0.0.0-20200330140219-3fe23663418f // indirect github.com/juju/testing v0.0.0-20200706033705-4c23f9c453cd // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118 // indirect github.com/mdlayher/packet v1.0.0 // indirect github.com/mdlayher/socket v0.2.1 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/spdystream v0.2.0 // indirect + github.com/moby/spdystream v0.4.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect - github.com/nxadm/tail v1.4.8 // indirect github.com/openshift/custom-resource-status v1.1.2 // indirect github.com/pborman/uuid v1.2.0 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/common v0.45.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/sirupsen/logrus v1.9.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/objx v0.5.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/vishvananda/netns v0.0.4 // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/crypto v0.23.0 // indirect + golang.org/x/crypto v0.24.0 // indirect golang.org/x/mod v0.17.0 // indirect - golang.org/x/oauth2 v0.16.0 // indirect - golang.org/x/term v0.20.0 // indirect - golang.org/x/text v0.15.0 // indirect - golang.org/x/tools v0.21.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/term v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.30.2 // indirect - k8s.io/component-base v0.30.2 // indirect + k8s.io/apiextensions-apiserver v0.31.1 // indirect + k8s.io/component-base v0.31.1 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect kubevirt.io/containerized-data-importer-api v1.55.0 // indirect kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/yaml v1.4.0 // indirect ) replace ( diff --git a/go-controller/go.sum b/go-controller/go.sum index 66e5cc4788..ec0da37bb6 100644 --- a/go-controller/go.sum +++ b/go-controller/go.sum @@ -112,8 +112,8 @@ github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0Bsq github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/cenk/hub v1.0.1 h1:RBwXNOF4a8KjD8BJ08XqN8KbrqaGiQLDrgvUGJSHuPA= github.com/cenk/hub v1.0.1/go.mod h1:rJM1LNAW0ppT8FMMuPK6c2NP/R2nH/UthtuRySSaf6Y= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cenkalti/hub v1.0.1 h1:UMtjc6dHSaOQTO15SVA50MBIR9zQwvsukQupDrkIRtg= github.com/cenkalti/hub v1.0.1/go.mod h1:tcYwtS3a2d9NO/0xDXVJWx3IedurUjYCqFCmpi0lpHs= github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984 h1:CNwZyGS6KpfaOWbh2yLkSy3rSTUh3jub9CzpFpP6PVQ= @@ -121,8 +121,8 @@ github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984/go.mod h1:v2npkhrXyk github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= @@ -224,12 +224,14 @@ github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= @@ -238,8 +240,9 @@ github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= @@ -286,6 +289,8 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4 github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/gaissmai/cidrtree v0.1.4 h1:/aYnv1LIwjtSDHNr1eNN99WJeh6vLrB+Sgr1tRMhHDc= github.com/gaissmai/cidrtree v0.1.4/go.mod h1:nrjEeeMZmvoJpLcSvZ3qIVFxw/+9GHKi7wDHHmHKGRI= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= @@ -304,8 +309,8 @@ github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7 github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= @@ -330,15 +335,19 @@ github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -401,6 +410,8 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -415,8 +426,8 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -544,8 +555,6 @@ github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/mdlayher/arp v0.0.0-20220512170110-6706a2966875 h1:ql8x//rJsHMjS+qqEag8n3i4azw1QneKh5PieH9UEbY= github.com/mdlayher/arp v0.0.0-20220512170110-6706a2966875/go.mod h1:kfOoFJuHWp76v1RgZCb9/gVUc7XdY877S2uVYbNliGc= github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118 h1:2oDp6OOhLxQ9JBoUuysVz9UZ9uI6oLUbvAZu0x8o+vE= @@ -568,8 +577,9 @@ github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQ github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= -github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8= +github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= @@ -608,8 +618,8 @@ github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= -github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -618,8 +628,8 @@ github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1y github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= -github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= -github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -643,8 +653,8 @@ github.com/openshift/client-go v0.0.0-20231121143148-910ca30a1a9a h1:4FVrw8hz0Wb github.com/openshift/client-go v0.0.0-20231121143148-910ca30a1a9a/go.mod h1:arApQobmOjZqtxw44TwnQdUCH+t9DgZ8geYPFqksHws= github.com/openshift/custom-resource-status v1.1.2 h1:C3DL44LEbvlbItfd8mT5jWrqPfHnSOQoQf/sypqA6A4= github.com/openshift/custom-resource-status v1.1.2/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA= -github.com/ovn-org/libovsdb v0.6.1-0.20240125124854-03f787b1a892 h1:/yg3/z+RH+iDLMxp6FTnmlk5bStK542/Rge5EBjnA9A= -github.com/ovn-org/libovsdb v0.6.1-0.20240125124854-03f787b1a892/go.mod h1:LC5DOvcY58jOG3HTvDyCVidoMJDurPeu+xlxv5Krd9Q= +github.com/ovn-org/libovsdb v0.7.1-0.20240820095311-ce1951614a20 h1:OoDvzyaK7F/ZANIIFOgb4Haj7mye3Hle0fYZZNdidSs= +github.com/ovn-org/libovsdb v0.7.1-0.20240820095311-ce1951614a20/go.mod h1:dJbxEaalQl83nn904K32FaMjlH/qOObZ0bj4ejQ78AI= github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= @@ -656,8 +666,9 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -665,23 +676,23 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= -github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= -github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -692,13 +703,13 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -715,8 +726,8 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= @@ -744,8 +755,9 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -756,8 +768,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -770,8 +782,8 @@ github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGr github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli/v2 v2.2.0 h1:JTTnM6wKzdA0Jqodd966MVj4vWbbquZykeX1sKbe2C4= -github.com/urfave/cli/v2 v2.2.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= +github.com/urfave/cli/v2 v2.27.2 h1:6e0H+AkS+zDckwPCUrZkKX38mRaau4nL2uipkJpbkcI= +github.com/urfave/cli/v2 v2.27.2/go.mod h1:g0+79LmHHATl7DAcHO99smiR/T7uGLw84w8Y42x+4eM= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= @@ -784,11 +796,15 @@ github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1Y github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 h1:+qGGcbkzsfDQNPPe9UDgpxAWQrhbbBXOYJFQDq/dtJw= +github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQutDLsQv2Deui4iYQ6DWTxR14g6m8Wv88+Xqk= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -796,7 +812,6 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= @@ -840,10 +855,9 @@ golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -881,7 +895,6 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -939,9 +952,8 @@ golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -951,8 +963,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -964,7 +976,6 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1053,17 +1064,15 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1074,9 +1083,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1145,9 +1153,8 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw= -golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1181,8 +1188,6 @@ google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1224,8 +1229,8 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1246,8 +1251,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= -google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/grpc/examples v0.0.0-20201112215255-90f1b3ee835b h1:NuxyvVZoDfHZwYW9LD4GJiF5/nhiSyP4/InTrvw9Ibk= google.golang.org/grpc/examples v0.0.0-20201112215255-90f1b3ee835b/go.mod h1:IBqQ7wSUJ2Ep09a8rMWFsg4fmI2r38zwsq8a0GgxXpM= google.golang.org/grpc/security/advancedtls v0.0.0-20240425232638-1e8b9b7fc655 h1:m116OZfEvs1iB0qlYNH3M9C+t8eQj3rT+2hzn88UWnU= @@ -1265,8 +1270,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1279,6 +1284,8 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/fsnotify/fsnotify.v1 v1.4.7 h1:XNNYLJHt73EyYiCZi6+xjupS9CpvmiDgjPTAjrBlQbo= gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= @@ -1330,17 +1337,17 @@ k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= -k8s.io/api v0.30.2 h1:+ZhRj+28QT4UOH+BKznu4CBgPWgkXO7XAvMcMl0qKvI= -k8s.io/api v0.30.2/go.mod h1:ULg5g9JvOev2dG0u2hig4Z7tQ2hHIuS+m8MNZ+X6EmI= -k8s.io/apiextensions-apiserver v0.30.2 h1:l7Eue2t6QiLHErfn2vwK4KgF4NeDgjQkCXtEbOocKIE= -k8s.io/apiextensions-apiserver v0.30.2/go.mod h1:lsJFLYyK40iguuinsb3nt+Sj6CmodSI4ACDLep1rgjw= +k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= +k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= +k8s.io/apiextensions-apiserver v0.31.1 h1:L+hwULvXx+nvTYX/MKM3kKMZyei+UiSXQWciX/N6E40= +k8s.io/apiextensions-apiserver v0.31.1/go.mod h1:tWMPR3sgW+jsl2xm9v7lAyRF1rYEK71i9G5dRtkknoQ= k8s.io/apimachinery v0.18.8/go.mod h1:6sQd+iHEqmOtALqOFjSWp2KZ9F0wlU/nWm0ZgsYWMig= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= -k8s.io/apimachinery v0.30.2 h1:fEMcnBj6qkzzPGSVsAZtQThU62SmQ4ZymlXRC5yFSCg= -k8s.io/apimachinery v0.30.2/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= +k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= @@ -1348,17 +1355,17 @@ k8s.io/client-go v0.18.8/go.mod h1:HqFqMllQ5NnQJNwjro9k5zMyfhZlOwpuTLVrxjkYSxU= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= -k8s.io/client-go v0.30.2 h1:sBIVJdojUNPDU/jObC+18tXWcTJVcwyqS9diGdWHk50= -k8s.io/client-go v0.30.2/go.mod h1:JglKSWULm9xlJLx4KCkfLLQ7XwtlbflV6uFFSHTMgVs= +k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0= +k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg= k8s.io/code-generator v0.18.8/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= -k8s.io/component-base v0.30.2 h1:pqGBczYoW1sno8q9ObExUqrYSKhtE5rW3y6gX88GZII= -k8s.io/component-base v0.30.2/go.mod h1:yQLkQDrkK8J6NtP+MGJOws+/PPeEXNpwFixsUI7h/OE= -k8s.io/component-helpers v0.30.2 h1:kDMYLiWEYeWU7H6jBI+Ua1i2hqNh0DzqDHNIppFC3po= -k8s.io/component-helpers v0.30.2/go.mod h1:tI0anfS6AbRqooaICkGg7UVAQLedOauVSQW9srDBnJw= +k8s.io/component-base v0.31.1 h1:UpOepcrX3rQ3ab5NB6g5iP0tvsgJWzxTyAo20sgYSy8= +k8s.io/component-base v0.31.1/go.mod h1:WGeaw7t/kTsqpVTaCoVEtillbqAhF2/JgvO0LDOMa0w= +k8s.io/component-helpers v0.31.1 h1:5hZUf3747atdgtR3gPntrG35rC2CkK7rYq2KUraz6Os= +k8s.io/component-helpers v0.31.1/go.mod h1:ye0Gi8KzFNTfpIuzvVDtxJQMP/0Owkukf1vGf22Hl6U= k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= @@ -1376,23 +1383,23 @@ k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= -k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= -k8s.io/kubernetes v1.30.2 h1:11WhS78OYX/lnSy6TXxPO6Hk+E5K9ZNrEsk9JgMSX8I= -k8s.io/kubernetes v1.30.2/go.mod h1:yPbIk3MhmhGigX62FLJm+CphNtjxqCvAIFQXup6RKS0= +k8s.io/kubernetes v1.31.1 h1:1fcYJe8SAhtannpChbmnzHLwAV9Je99PrGaFtBvCxms= +k8s.io/kubernetes v1.31.1/go.mod h1:/YGPL//Fb9mdv5vukvAQ7Xon+Bqwry52bmjTdORAw+Q= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200327001022-6496210b90e8/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= kubevirt.io/api v1.0.0-alpha.0 h1:KltThlM4UY/MMZanpL3nWNA2nA1sS8g4SHKTccNmwhg= kubevirt.io/api v1.0.0-alpha.0/go.mod h1:zts/6mioR8vGgvYmQ17Cb9XsUR9e/WjJcdokmrE38wY= kubevirt.io/containerized-data-importer-api v1.55.0 h1:IQNc8PYVq1cTwKNPEJza5xSlcnXeYVNt76M5kZ8X7xo= @@ -1404,13 +1411,13 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= -sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= +sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= +sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/knftables v0.0.16 h1:ZpTfNsjnidgoXdxxzcZLdSctqkpSO3QB3jo3zQ4PXqM= -sigs.k8s.io/knftables v0.0.16/go.mod h1:f/5ZLKYEUPUhVjUCg6l80ACdL7CIIyeL0DxfgojGRTk= +sigs.k8s.io/knftables v0.0.18 h1:6Duvmu0s/HwGifKrtl6G3AyAPYlWiZqTgS8bkVMiyaE= +sigs.k8s.io/knftables v0.0.18/go.mod h1:f/5ZLKYEUPUhVjUCg6l80ACdL7CIIyeL0DxfgojGRTk= sigs.k8s.io/network-policy-api v0.1.5 h1:xyS7VAaM9EfyB428oFk7WjWaCK6B129i+ILUF4C8l6E= sigs.k8s.io/network-policy-api v0.1.5/go.mod h1:D7Nkr43VLNd7iYryemnj8qf0N/WjBzTZDxYA+g4u1/Y= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= diff --git a/go-controller/hack/lint.sh b/go-controller/hack/lint.sh index 71622c26a7..3aefac568e 100755 --- a/go-controller/hack/lint.sh +++ b/go-controller/hack/lint.sh @@ -8,7 +8,7 @@ fi $1 run --security-opt label=disable --rm \ -v ${HOME}/.cache/golangci-lint:/cache -e GOLANGCI_LINT_CACHE=/cache \ - -v $(pwd):/app -w /app -e GO111MODULE=on golangci/golangci-lint:${VERSION} \ + -v $(pwd):/app -w /app -e GO111MODULE=on docker.io/golangci/golangci-lint:${VERSION} \ golangci-lint run --verbose --print-resources-usage \ --modules-download-mode=vendor --timeout=15m0s && \ echo "lint OK!" diff --git a/go-controller/hack/test-go.sh b/go-controller/hack/test-go.sh index 003b18d15d..fcd094999b 100755 --- a/go-controller/hack/test-go.sh +++ b/go-controller/hack/test-go.sh @@ -61,7 +61,7 @@ function testrun { fi if grep -q "ginkgo" ."${path}"/*_test.go; then prefix=$(echo "${path}" | cut -c 2- | sed 's,/,_,g') - ginkgoargs="-ginkgo.v ${ginkgo_focus} -ginkgo.reportFile ${TEST_REPORT_DIR}/junit-${prefix}.xml" + ginkgoargs="-ginkgo.v ${ginkgo_focus} -ginkgo.junit-report ${TEST_REPORT_DIR}/junit-${prefix}.xml" fi args="${args}${otherargs}" if [ "$go_test" == "gocmd test -mod=vendor" ]; then @@ -72,7 +72,7 @@ function testrun { } # These packages requires root for network namespace manipulation in unit tests -root_pkgs=("github.com/ovn-org/ovn-kubernetes/go-controller/pkg/network-controller-manager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/iptables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/rulemanager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/routemanager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/controllers/egressip") +root_pkgs=("github.com/ovn-org/ovn-kubernetes/go-controller/pkg/network-controller-manager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/iptables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/rulemanager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/routemanager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/vrfmanager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/controllers/egressip") # These packages are big and require more than the 10m default to run the unit tests big_pkgs=("github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn") diff --git a/go-controller/hack/update-codegen.sh b/go-controller/hack/update-codegen.sh index 7c8389946a..efe60cf533 100755 --- a/go-controller/hack/update-codegen.sh +++ b/go-controller/hack/update-codegen.sh @@ -13,7 +13,7 @@ SCRIPT_ROOT=$(dirname ${BASH_SOURCE})/.. olddir="${PWD}" builddir="$(mktemp -d)" cd "${builddir}" -GO111MODULE=on go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.14.0 +GO111MODULE=on go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.16.4 BINS=( deepcopy-gen applyconfiguration-gen @@ -21,7 +21,7 @@ BINS=( informer-gen lister-gen ) -GO111MODULE=on go install $(printf "k8s.io/code-generator/cmd/%s@release-1.29 " "${BINS[@]}") +GO111MODULE=on go install $(printf "k8s.io/code-generator/cmd/%s@v0.31.1 " "${BINS[@]}") cd "${olddir}" if [[ "${builddir}" == /tmp/* ]]; then #paranoia rm -rf "${builddir}" @@ -31,17 +31,17 @@ for crd in ${crds}; do echo "Generating deepcopy funcs for $crd" deepcopy-gen \ --go-header-file hack/boilerplate.go.txt \ - --input-dirs github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1 \ - --output-base "${SCRIPT_ROOT}" \ - -O zz_generated.deepcopy \ - --bounding-dirs github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd + --output-file zz_generated.deepcopy.go \ + --bounding-dirs github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd \ + github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1 \ + "$@" echo "Generating apply configuration for $crd" applyconfiguration-gen \ --go-header-file hack/boilerplate.go.txt \ - --input-dirs github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1 \ - --output-base "${SCRIPT_ROOT}" \ - --output-package github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1/apis/applyconfiguration \ + --output-dir "${SCRIPT_ROOT}"/pkg/crd/$crd/v1/apis/applyconfiguration \ + --output-pkg github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1/apis/applyconfiguration \ + github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1 \ "$@" echo "Generating clientset for $crd" @@ -50,43 +50,34 @@ for crd in ${crds}; do --clientset-name "${CLIENTSET_NAME_VERSIONED:-versioned}" \ --input-base "" \ --input github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1 \ - --output-base "${SCRIPT_ROOT}" \ - --output-package github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1/apis/clientset \ + --output-dir "${SCRIPT_ROOT}"/pkg/crd/$crd/v1/apis/clientset \ + --output-pkg github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1/apis/clientset \ --apply-configuration-package github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1/apis/applyconfiguration \ - --plural-exceptions="EgressQoS:EgressQoSes" \ + --plural-exceptions="EgressQoS:EgressQoSes,RouteAdvertisements:RouteAdvertisements" \ "$@" echo "Generating listers for $crd" lister-gen \ --go-header-file hack/boilerplate.go.txt \ - --input-dirs github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1 \ - --output-base "${SCRIPT_ROOT}" \ - --output-package github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1/apis/listers \ - --plural-exceptions="EgressQoS:EgressQoSes" \ + --output-dir "${SCRIPT_ROOT}"/pkg/crd/$crd/v1/apis/listers \ + --output-pkg github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1/apis/listers \ + --plural-exceptions="EgressQoS:EgressQoSes,RouteAdvertisements:RouteAdvertisements" \ + github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1 \ "$@" echo "Generating informers for $crd" informer-gen \ --go-header-file hack/boilerplate.go.txt \ - --input-dirs github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1 \ --versioned-clientset-package github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1/apis/clientset/versioned \ --listers-package github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1/apis/listers \ - --output-base "${SCRIPT_ROOT}" \ - --output-package github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1/apis/informers \ - --plural-exceptions="EgressQoS:EgressQoSes" \ + --output-dir "${SCRIPT_ROOT}"/pkg/crd/$crd/v1/apis/informers \ + --output-pkg github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1/apis/informers \ + --plural-exceptions="EgressQoS:EgressQoSes,RouteAdvertisements:RouteAdvertisements" \ + github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1 \ "$@" - echo "Copying apis for $crd" - rm -rf $SCRIPT_ROOT/pkg/crd/$crd/v1/apis - cp -r github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1/apis $SCRIPT_ROOT/pkg/crd/$crd/v1 - - echo "Copying zz_generated for $crd" - cp github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1/zz_generated.deepcopy.go $SCRIPT_ROOT/pkg/crd/$crd/v1 - done -rm -rf "${SCRIPT_ROOT}/github.com/" - echo "Generating CRDs" mkdir -p _output/crds controller-gen crd:crdVersions="v1" paths=./pkg/crd/... output:crd:dir=_output/crds @@ -113,7 +104,9 @@ echo "Copying adminpolicybasedexternalroutes CRD" cp _output/crds/k8s.ovn.org_adminpolicybasedexternalroutes.yaml ../dist/templates/k8s.ovn.org_adminpolicybasedexternalroutes.yaml.j2 echo "Copying egressService CRD" cp _output/crds/k8s.ovn.org_egressservices.yaml ../dist/templates/k8s.ovn.org_egressservices.yaml.j2 -echo "Copying IPAMClaim CRD" -curl -sSL https://raw.githubusercontent.com/k8snetworkplumbingwg/ipamclaims/v0.4.0-alpha/artifacts/k8s.cni.cncf.io_ipamclaims.yaml -o ../dist/templates/k8s.cni.cncf.io_ipamclaims.yaml echo "Copying userdefinednetworks CRD" cp _output/crds/k8s.ovn.org_userdefinednetworks.yaml ../dist/templates/k8s.ovn.org_userdefinednetworks.yaml.j2 +echo "Copying clusteruserdefinednetworks CRD" +cp _output/crds/k8s.ovn.org_clusteruserdefinednetworks.yaml ../dist/templates/k8s.ovn.org_clusteruserdefinednetworks.yaml.j2 +echo "Copying routeAdvertisements CRD" +cp _output/crds/k8s.ovn.org_routeadvertisements.yaml ../dist/templates/k8s.ovn.org_routeadvertisements.yaml.j2 diff --git a/go-controller/hack/update-modelgen.sh b/go-controller/hack/update-modelgen.sh index 00f0156c48..18c6f1a3cd 100755 --- a/go-controller/hack/update-modelgen.sh +++ b/go-controller/hack/update-modelgen.sh @@ -9,7 +9,7 @@ if ! ( command -v modelgen > /dev/null ); then builddir="$(mktemp -d)" cd "${builddir}" # ensure the hash value is not outdated, if wrong bindings are being generated re-install modelgen - GO111MODULE=on go install github.com/ovn-org/libovsdb/cmd/modelgen@03f787b1a8922c112936f4f4d1d75db04967d1be + GO111MODULE=on go install github.com/ovn-org/libovsdb/cmd/modelgen@v0.7.0 cd "${olddir}" if [[ "${builddir}" == /tmp/* ]]; then #paranoia rm -rf "${builddir}" @@ -18,3 +18,4 @@ fi go generate ./pkg/nbdb go generate ./pkg/sbdb +go generate ./pkg/vswitchd diff --git a/go-controller/hybrid-overlay/pkg/controller/controller_suite_test.go b/go-controller/hybrid-overlay/pkg/controller/controller_suite_test.go index c44eead38d..fd23b8de57 100644 --- a/go-controller/hybrid-overlay/pkg/controller/controller_suite_test.go +++ b/go-controller/hybrid-overlay/pkg/controller/controller_suite_test.go @@ -3,7 +3,7 @@ package controller import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/go-controller/hybrid-overlay/pkg/controller/ho_node_linux_test.go b/go-controller/hybrid-overlay/pkg/controller/ho_node_linux_test.go index 692683a11b..2be921c22c 100644 --- a/go-controller/hybrid-overlay/pkg/controller/ho_node_linux_test.go +++ b/go-controller/hybrid-overlay/pkg/controller/ho_node_linux_test.go @@ -20,7 +20,7 @@ import ( "github.com/vishvananda/netlink" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/go-controller/hybrid-overlay/pkg/controller/ovn_node_linux_test.go b/go-controller/hybrid-overlay/pkg/controller/ovn_node_linux_test.go index 5220d4f44a..a40e1ed76e 100644 --- a/go-controller/hybrid-overlay/pkg/controller/ovn_node_linux_test.go +++ b/go-controller/hybrid-overlay/pkg/controller/ovn_node_linux_test.go @@ -27,7 +27,7 @@ import ( iputils "github.com/containernetworking/plugins/pkg/ip" "github.com/vishvananda/netlink" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/go-controller/observability-lib/model/network_event.go b/go-controller/observability-lib/model/network_event.go new file mode 100644 index 0000000000..00fb19fcfd --- /dev/null +++ b/go-controller/observability-lib/model/network_event.go @@ -0,0 +1,61 @@ +package model + +import ( + "fmt" + + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +type NetworkEvent interface { + String() string +} + +type ACLEvent struct { + NetworkEvent + Action string + Actor string + Name string + Namespace string + Direction string +} + +func (e *ACLEvent) String() string { + var action string + switch e.Action { + case nbdb.ACLActionAllow, nbdb.ACLActionAllowRelated, nbdb.ACLActionAllowStateless: + action = "Allowed" + case nbdb.ACLActionDrop: + action = "Dropped" + case nbdb.ACLActionPass: + action = "Delegated to network policy" + default: + action = "Action " + e.Action + } + var msg string + switch e.Actor { + case libovsdbops.AdminNetworkPolicyOwnerType: + msg = fmt.Sprintf("admin network policy %s, direction %s", e.Name, e.Direction) + case libovsdbops.BaselineAdminNetworkPolicyOwnerType: + msg = fmt.Sprintf("baseline admin network policy %s, direction %s", e.Name, e.Direction) + case libovsdbops.MulticastNamespaceOwnerType: + msg = fmt.Sprintf("multicast in namespace %s, direction %s", e.Namespace, e.Direction) + case libovsdbops.MulticastClusterOwnerType: + msg = fmt.Sprintf("cluster multicast policy, direction %s", e.Direction) + case libovsdbops.NetpolNodeOwnerType: + msg = fmt.Sprintf("default allow from local node policy, direction %s", e.Direction) + case libovsdbops.NetworkPolicyOwnerType: + if e.Namespace != "" { + msg = fmt.Sprintf("network policy %s in namespace %s, direction %s", e.Name, e.Namespace, e.Direction) + } else { + msg = fmt.Sprintf("network policy %s, direction %s", e.Name, e.Direction) + } + case libovsdbops.NetpolNamespaceOwnerType: + msg = fmt.Sprintf("network policies isolation in namespace %s, direction %s", e.Namespace, e.Direction) + case libovsdbops.EgressFirewallOwnerType: + msg = fmt.Sprintf("egress firewall in namespace %s", e.Namespace) + case libovsdbops.UDNIsolationOwnerType: + msg = fmt.Sprintf("UDN isolation of type %s", e.Name) + } + return fmt.Sprintf("%s by %s", action, msg) +} diff --git a/go-controller/observability-lib/ovsdb/.gitignore b/go-controller/observability-lib/ovsdb/.gitignore new file mode 100644 index 0000000000..734ba1effc --- /dev/null +++ b/go-controller/observability-lib/ovsdb/.gitignore @@ -0,0 +1 @@ +*.ovsschema diff --git a/go-controller/observability-lib/ovsdb/bridge.go b/go-controller/observability-lib/ovsdb/bridge.go new file mode 100644 index 0000000000..d0135c4886 --- /dev/null +++ b/go-controller/observability-lib/ovsdb/bridge.go @@ -0,0 +1,570 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package ovsdb + +import "github.com/ovn-org/libovsdb/model" + +const BridgeTable = "Bridge" + +type ( + BridgeFailMode = string + BridgeProtocols = string +) + +var ( + BridgeFailModeStandalone BridgeFailMode = "standalone" + BridgeFailModeSecure BridgeFailMode = "secure" + BridgeProtocolsOpenflow10 BridgeProtocols = "OpenFlow10" + BridgeProtocolsOpenflow11 BridgeProtocols = "OpenFlow11" + BridgeProtocolsOpenflow12 BridgeProtocols = "OpenFlow12" + BridgeProtocolsOpenflow13 BridgeProtocols = "OpenFlow13" + BridgeProtocolsOpenflow14 BridgeProtocols = "OpenFlow14" + BridgeProtocolsOpenflow15 BridgeProtocols = "OpenFlow15" +) + +// Bridge defines an object in Bridge table +type Bridge struct { + UUID string `ovsdb:"_uuid"` + AutoAttach *string `ovsdb:"auto_attach"` + Controller []string `ovsdb:"controller"` + DatapathID *string `ovsdb:"datapath_id"` + DatapathType string `ovsdb:"datapath_type"` + DatapathVersion string `ovsdb:"datapath_version"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + FailMode *BridgeFailMode `ovsdb:"fail_mode"` + FloodVLANs []int `ovsdb:"flood_vlans"` + FlowTables map[int]string `ovsdb:"flow_tables"` + IPFIX *string `ovsdb:"ipfix"` + McastSnoopingEnable bool `ovsdb:"mcast_snooping_enable"` + Mirrors []string `ovsdb:"mirrors"` + Name string `ovsdb:"name"` + Netflow *string `ovsdb:"netflow"` + OtherConfig map[string]string `ovsdb:"other_config"` + Ports []string `ovsdb:"ports"` + Protocols []BridgeProtocols `ovsdb:"protocols"` + RSTPEnable bool `ovsdb:"rstp_enable"` + RSTPStatus map[string]string `ovsdb:"rstp_status"` + Sflow *string `ovsdb:"sflow"` + Status map[string]string `ovsdb:"status"` + STPEnable bool `ovsdb:"stp_enable"` +} + +func (a *Bridge) GetUUID() string { + return a.UUID +} + +func (a *Bridge) GetAutoAttach() *string { + return a.AutoAttach +} + +func copyBridgeAutoAttach(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBridgeAutoAttach(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Bridge) GetController() []string { + return a.Controller +} + +func copyBridgeController(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalBridgeController(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Bridge) GetDatapathID() *string { + return a.DatapathID +} + +func copyBridgeDatapathID(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBridgeDatapathID(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Bridge) GetDatapathType() string { + return a.DatapathType +} + +func (a *Bridge) GetDatapathVersion() string { + return a.DatapathVersion +} + +func (a *Bridge) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyBridgeExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalBridgeExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Bridge) GetFailMode() *BridgeFailMode { + return a.FailMode +} + +func copyBridgeFailMode(a *BridgeFailMode) *BridgeFailMode { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBridgeFailMode(a, b *BridgeFailMode) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Bridge) GetFloodVLANs() []int { + return a.FloodVLANs +} + +func copyBridgeFloodVLANs(a []int) []int { + if a == nil { + return nil + } + b := make([]int, len(a)) + copy(b, a) + return b +} + +func equalBridgeFloodVLANs(a, b []int) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Bridge) GetFlowTables() map[int]string { + return a.FlowTables +} + +func copyBridgeFlowTables(a map[int]string) map[int]string { + if a == nil { + return nil + } + b := make(map[int]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalBridgeFlowTables(a, b map[int]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Bridge) GetIPFIX() *string { + return a.IPFIX +} + +func copyBridgeIPFIX(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBridgeIPFIX(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Bridge) GetMcastSnoopingEnable() bool { + return a.McastSnoopingEnable +} + +func (a *Bridge) GetMirrors() []string { + return a.Mirrors +} + +func copyBridgeMirrors(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalBridgeMirrors(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Bridge) GetName() string { + return a.Name +} + +func (a *Bridge) GetNetflow() *string { + return a.Netflow +} + +func copyBridgeNetflow(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBridgeNetflow(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Bridge) GetOtherConfig() map[string]string { + return a.OtherConfig +} + +func copyBridgeOtherConfig(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalBridgeOtherConfig(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Bridge) GetPorts() []string { + return a.Ports +} + +func copyBridgePorts(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalBridgePorts(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Bridge) GetProtocols() []BridgeProtocols { + return a.Protocols +} + +func copyBridgeProtocols(a []BridgeProtocols) []BridgeProtocols { + if a == nil { + return nil + } + b := make([]BridgeProtocols, len(a)) + copy(b, a) + return b +} + +func equalBridgeProtocols(a, b []BridgeProtocols) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Bridge) GetRSTPEnable() bool { + return a.RSTPEnable +} + +func (a *Bridge) GetRSTPStatus() map[string]string { + return a.RSTPStatus +} + +func copyBridgeRSTPStatus(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalBridgeRSTPStatus(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Bridge) GetSflow() *string { + return a.Sflow +} + +func copyBridgeSflow(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBridgeSflow(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Bridge) GetStatus() map[string]string { + return a.Status +} + +func copyBridgeStatus(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalBridgeStatus(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Bridge) GetSTPEnable() bool { + return a.STPEnable +} + +func (a *Bridge) DeepCopyInto(b *Bridge) { + *b = *a + b.AutoAttach = copyBridgeAutoAttach(a.AutoAttach) + b.Controller = copyBridgeController(a.Controller) + b.DatapathID = copyBridgeDatapathID(a.DatapathID) + b.ExternalIDs = copyBridgeExternalIDs(a.ExternalIDs) + b.FailMode = copyBridgeFailMode(a.FailMode) + b.FloodVLANs = copyBridgeFloodVLANs(a.FloodVLANs) + b.FlowTables = copyBridgeFlowTables(a.FlowTables) + b.IPFIX = copyBridgeIPFIX(a.IPFIX) + b.Mirrors = copyBridgeMirrors(a.Mirrors) + b.Netflow = copyBridgeNetflow(a.Netflow) + b.OtherConfig = copyBridgeOtherConfig(a.OtherConfig) + b.Ports = copyBridgePorts(a.Ports) + b.Protocols = copyBridgeProtocols(a.Protocols) + b.RSTPStatus = copyBridgeRSTPStatus(a.RSTPStatus) + b.Sflow = copyBridgeSflow(a.Sflow) + b.Status = copyBridgeStatus(a.Status) +} + +func (a *Bridge) DeepCopy() *Bridge { + b := new(Bridge) + a.DeepCopyInto(b) + return b +} + +func (a *Bridge) CloneModelInto(b model.Model) { + c := b.(*Bridge) + a.DeepCopyInto(c) +} + +func (a *Bridge) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Bridge) Equals(b *Bridge) bool { + return a.UUID == b.UUID && + equalBridgeAutoAttach(a.AutoAttach, b.AutoAttach) && + equalBridgeController(a.Controller, b.Controller) && + equalBridgeDatapathID(a.DatapathID, b.DatapathID) && + a.DatapathType == b.DatapathType && + a.DatapathVersion == b.DatapathVersion && + equalBridgeExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalBridgeFailMode(a.FailMode, b.FailMode) && + equalBridgeFloodVLANs(a.FloodVLANs, b.FloodVLANs) && + equalBridgeFlowTables(a.FlowTables, b.FlowTables) && + equalBridgeIPFIX(a.IPFIX, b.IPFIX) && + a.McastSnoopingEnable == b.McastSnoopingEnable && + equalBridgeMirrors(a.Mirrors, b.Mirrors) && + a.Name == b.Name && + equalBridgeNetflow(a.Netflow, b.Netflow) && + equalBridgeOtherConfig(a.OtherConfig, b.OtherConfig) && + equalBridgePorts(a.Ports, b.Ports) && + equalBridgeProtocols(a.Protocols, b.Protocols) && + a.RSTPEnable == b.RSTPEnable && + equalBridgeRSTPStatus(a.RSTPStatus, b.RSTPStatus) && + equalBridgeSflow(a.Sflow, b.Sflow) && + equalBridgeStatus(a.Status, b.Status) && + a.STPEnable == b.STPEnable +} + +func (a *Bridge) EqualsModel(b model.Model) bool { + c := b.(*Bridge) + return a.Equals(c) +} + +var _ model.CloneableModel = &Bridge{} +var _ model.ComparableModel = &Bridge{} diff --git a/go-controller/observability-lib/ovsdb/flow_sample_collector_set.go b/go-controller/observability-lib/ovsdb/flow_sample_collector_set.go new file mode 100644 index 0000000000..57a26e805d --- /dev/null +++ b/go-controller/observability-lib/ovsdb/flow_sample_collector_set.go @@ -0,0 +1,143 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package ovsdb + +import "github.com/ovn-org/libovsdb/model" + +const FlowSampleCollectorSetTable = "Flow_Sample_Collector_Set" + +// FlowSampleCollectorSet defines an object in Flow_Sample_Collector_Set table +type FlowSampleCollectorSet struct { + UUID string `ovsdb:"_uuid"` + Bridge string `ovsdb:"bridge"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + ID int `ovsdb:"id"` + IPFIX *string `ovsdb:"ipfix"` + LocalGroupID *int `ovsdb:"local_group_id"` +} + +func (a *FlowSampleCollectorSet) GetUUID() string { + return a.UUID +} + +func (a *FlowSampleCollectorSet) GetBridge() string { + return a.Bridge +} + +func (a *FlowSampleCollectorSet) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyFlowSampleCollectorSetExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalFlowSampleCollectorSetExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *FlowSampleCollectorSet) GetID() int { + return a.ID +} + +func (a *FlowSampleCollectorSet) GetIPFIX() *string { + return a.IPFIX +} + +func copyFlowSampleCollectorSetIPFIX(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalFlowSampleCollectorSetIPFIX(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *FlowSampleCollectorSet) GetLocalGroupID() *int { + return a.LocalGroupID +} + +func copyFlowSampleCollectorSetLocalGroupID(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalFlowSampleCollectorSetLocalGroupID(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *FlowSampleCollectorSet) DeepCopyInto(b *FlowSampleCollectorSet) { + *b = *a + b.ExternalIDs = copyFlowSampleCollectorSetExternalIDs(a.ExternalIDs) + b.IPFIX = copyFlowSampleCollectorSetIPFIX(a.IPFIX) + b.LocalGroupID = copyFlowSampleCollectorSetLocalGroupID(a.LocalGroupID) +} + +func (a *FlowSampleCollectorSet) DeepCopy() *FlowSampleCollectorSet { + b := new(FlowSampleCollectorSet) + a.DeepCopyInto(b) + return b +} + +func (a *FlowSampleCollectorSet) CloneModelInto(b model.Model) { + c := b.(*FlowSampleCollectorSet) + a.DeepCopyInto(c) +} + +func (a *FlowSampleCollectorSet) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *FlowSampleCollectorSet) Equals(b *FlowSampleCollectorSet) bool { + return a.UUID == b.UUID && + a.Bridge == b.Bridge && + equalFlowSampleCollectorSetExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.ID == b.ID && + equalFlowSampleCollectorSetIPFIX(a.IPFIX, b.IPFIX) && + equalFlowSampleCollectorSetLocalGroupID(a.LocalGroupID, b.LocalGroupID) +} + +func (a *FlowSampleCollectorSet) EqualsModel(b model.Model) bool { + c := b.(*FlowSampleCollectorSet) + return a.Equals(c) +} + +var _ model.CloneableModel = &FlowSampleCollectorSet{} +var _ model.ComparableModel = &FlowSampleCollectorSet{} diff --git a/go-controller/observability-lib/ovsdb/gen.go b/go-controller/observability-lib/ovsdb/gen.go new file mode 100644 index 0000000000..c5aabca468 --- /dev/null +++ b/go-controller/observability-lib/ovsdb/gen.go @@ -0,0 +1,3 @@ +package ovsdb + +//go:generate modelgen --extended -p ovsdb -o . vswitch.ovsschema diff --git a/go-controller/observability-lib/ovsdb/observ_model.go b/go-controller/observability-lib/ovsdb/observ_model.go new file mode 100644 index 0000000000..7ba2329e34 --- /dev/null +++ b/go-controller/observability-lib/ovsdb/observ_model.go @@ -0,0 +1,11 @@ +package ovsdb + +import "github.com/ovn-org/libovsdb/model" + +// ObservDatabaseModel returns the DatabaseModel object to be used by observability library. +func ObservDatabaseModel() (model.ClientDBModel, error) { + return model.NewClientDBModel("Open_vSwitch", map[string]model.Model{ + "Bridge": &Bridge{}, + "Flow_Sample_Collector_Set": &FlowSampleCollectorSet{}, + }) +} diff --git a/go-controller/observability-lib/parse_sample.go b/go-controller/observability-lib/parse_sample.go new file mode 100644 index 0000000000..d68c5e7020 --- /dev/null +++ b/go-controller/observability-lib/parse_sample.go @@ -0,0 +1,244 @@ +package observability_lib + +import ( + "bufio" + "bytes" + "context" + "encoding/binary" + "fmt" + "io" + "log" + "os" + "strings" + "syscall" + "unsafe" + + "golang.org/x/sys/unix" + + "github.com/google/gopacket" + "github.com/google/gopacket/layers" + "github.com/vishvananda/netlink" + "github.com/vishvananda/netlink/nl" + + "github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder" +) + +const ( + PSAMPLE_GENL_NAME = "psample" + PSAMPLE_NL_MCGRP_SAMPLE_NAME = "packets" +) + +const ( + PSAMPLE_ATTR_IIFINDEX = iota + PSAMPLE_ATTR_OIFINDEX + PSAMPLE_ATTR_ORIGSIZE + PSAMPLE_ATTR_SAMPLE_GROUP + PSAMPLE_ATTR_GROUP_SEQ + PSAMPLE_ATTR_SAMPLE_RATE + PSAMPLE_ATTR_DATA + PSAMPLE_ATTR_GROUP_REFCOUNT + PSAMPLE_ATTR_TUNNEL + PSAMPLE_ATTR_PAD + PSAMPLE_ATTR_OUT_TC /* u16 */ + PSAMPLE_ATTR_OUT_TC_OCC /* u64, bytes */ + PSAMPLE_ATTR_LATENCY /* u64, nanoseconds */ + PSAMPLE_ATTR_TIMESTAMP /* u64, nanoseconds */ + PSAMPLE_ATTR_PROTO /* u16 */ + PSAMPLE_ATTR_USER_COOKIE + __PSAMPLE_ATTR_MAX +) + +type SampleReader struct { + enableDecoder bool + logCookie bool + printFullPacket bool + addOVSCollector bool + srcIP, dstIP string + outputFile string + + decoder *sampledecoder.SampleDecoder + cookieStr []string +} + +func NewSampleReader(enableDecoder, logCookie, printFullPacket, addOVSCollector bool, srcIP, dstIP, outputFile string) *SampleReader { + r := &SampleReader{ + enableDecoder: enableDecoder, + logCookie: logCookie, + printFullPacket: printFullPacket, + addOVSCollector: addOVSCollector, + srcIP: srcIP, + dstIP: dstIP, + outputFile: outputFile, + } + if logCookie { + r.cookieStr = make([]string, 2) + } + return r +} + +func (r *SampleReader) ReadSamples(ctx context.Context) error { + if r.enableDecoder { + var err error + // currently only local nbdb connection is supported. + nbdbSocketPath := "/var/run/ovn/ovnnb_db.sock" + if r.addOVSCollector { + r.decoder, err = sampledecoder.NewSampleDecoderWithDefaultCollector(ctx, nbdbSocketPath, "ovnk-debug", 123) + if err != nil { + return fmt.Errorf("error creating decoder: %w", err) + } + defer r.decoder.Shutdown() + } else { + r.decoder, err = sampledecoder.NewSampleDecoder(ctx, nbdbSocketPath) + if err != nil { + return fmt.Errorf("error creating decoder: %w", err) + } + } + } + var writer io.Writer + if r.outputFile != "" { + file, err := os.Create(r.outputFile) + if err != nil { + return fmt.Errorf("error creating output file: %w", err) + } + defer file.Close() + writer = bufio.NewWriter(file) + } else { + writer = os.Stdout + } + l := log.New(writer, "", log.Ldate|log.Ltime|log.Lmicroseconds) + printlnFunc := func(a ...any) { + l.Println(a...) + } + + fam, err := netlink.GenlFamilyGet(PSAMPLE_GENL_NAME) + if err != nil { + return fmt.Errorf("error getting netlink family %s: %w", PSAMPLE_GENL_NAME, err) + } + if len(fam.Groups) == 0 { + return fmt.Errorf("no mcast groups found for %s", PSAMPLE_GENL_NAME) + } + var ovsGroupID uint32 + for _, group := range fam.Groups { + if group.Name == PSAMPLE_NL_MCGRP_SAMPLE_NAME { + ovsGroupID = group.ID + } + } + if ovsGroupID == 0 { + return fmt.Errorf("no mcast group found for %s", PSAMPLE_NL_MCGRP_SAMPLE_NAME) + } else { + fmt.Printf("Found group %s, id %d\n", PSAMPLE_NL_MCGRP_SAMPLE_NAME, ovsGroupID) + } + sock, err := nl.Subscribe(nl.GENL_ID_CTRL, uint(ovsGroupID)) + if err != nil { + return fmt.Errorf("error subscribing to netlink group %d: %w", ovsGroupID, err) + } + + // Otherwise sock.Receive() will be blocking and won't return on context close + if err = unix.SetNonblock(sock.GetFd(), true); err != nil { + return fmt.Errorf("error setting non-blocking mode: %w", err) + } + + defer func() { + sock.Close() + }() + + for { + select { + case <-ctx.Done(): + return nil + default: + msgs, _, err := sock.Receive() + if err != nil { + if err == syscall.EAGAIN { + continue + } + printlnFunc("ERROR: receive failed:", err) + continue + } + if err = r.parseMsg(msgs, printlnFunc); err != nil { + printlnFunc("ERROR: ", err) + } + } + } +} + +func getHostEndian() binary.ByteOrder { + buf := [2]byte{} + *(*uint16)(unsafe.Pointer(&buf[0])) = uint16(0xABCD) + + switch buf { + case [2]byte{0xCD, 0xAB}: + return binary.LittleEndian + case [2]byte{0xAB, 0xCD}: + return binary.BigEndian + default: + panic("Could not determine native endianness.") + } +} + +var hostEndian = getHostEndian() + +func (r *SampleReader) parseMsg(msgs []syscall.NetlinkMessage, printlnFunc func(a ...any)) error { + for _, msg := range msgs { + var packetStr, sampleStr string + data := msg.Data[nl.SizeofGenlmsg:] + for attr := range nl.ParseAttributes(data) { + if r.logCookie && attr.Type == PSAMPLE_ATTR_SAMPLE_GROUP { + if uint64(len(attr.Value)) == 4 { + g := uint32(0) + // group is encoded using host endian + err := binary.Read(bytes.NewReader(attr.Value), hostEndian, &g) + if err != nil { + return err + } + r.cookieStr[0] = fmt.Sprintf("group_id=%v", g) + } + } + if attr.Type == PSAMPLE_ATTR_USER_COOKIE && (r.logCookie || r.decoder != nil) { + if uint64(len(attr.Value)) == sampledecoder.CookieSize { + c := sampledecoder.Cookie{} + err := binary.Read(bytes.NewReader(attr.Value), sampledecoder.SampleEndian, &c) + if err != nil { + return err + } + if r.logCookie { + r.cookieStr[1] = fmt.Sprintf("obs_domain=%v, obs_point=%v", + c.ObsDomainID, c.ObsPointID) + } + if r.decoder != nil { + decoded, err := r.decoder.DecodeCookieIDs(c.ObsDomainID, c.ObsPointID) + if err != nil { + sampleStr = fmt.Sprintf("decoding failed: %v", err) + } else { + sampleStr = fmt.Sprintf("OVN-K message: %s", decoded.String()) + } + } + } + } + if attr.Type == PSAMPLE_ATTR_DATA { + packet := gopacket.NewPacket(attr.Value, layers.LayerTypeEthernet, gopacket.Lazy) + networkLayer := packet.NetworkLayer().NetworkFlow() + if r.printFullPacket { + packetStr = packet.String() + } else { + packetStr = fmt.Sprintf("src=%s, dst=%v\n", + networkLayer.Src().String(), networkLayer.Dst().String()) + } + if r.srcIP != "" && r.srcIP != networkLayer.Src().String() { + return nil + } + if r.dstIP != "" && r.dstIP != networkLayer.Dst().String() { + return nil + } + } + } + if r.logCookie { + printlnFunc(strings.Join(r.cookieStr, ", ")) + } + if r.decoder != nil { + printlnFunc(sampleStr) + } + printlnFunc(packetStr) + } + return nil +} diff --git a/go-controller/observability-lib/sampledecoder/db_client.go b/go-controller/observability-lib/sampledecoder/db_client.go new file mode 100644 index 0000000000..5ff1587a6f --- /dev/null +++ b/go-controller/observability-lib/sampledecoder/db_client.go @@ -0,0 +1,118 @@ +package sampledecoder + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/cenkalti/backoff/v4" + "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + "k8s.io/klog/v2/textlogger" +) + +const OVSDBTimeout = 10 * time.Second + +func NewNBClientWithConfig(ctx context.Context, cfg dbConfig) (client.Client, error) { + dbModel, err := nbdb.FullDatabaseModel() + if err != nil { + return nil, err + } + + // define client indexes for ACLs to quickly find them by sample_new or sample_est column. + dbModel.SetIndexes(map[string][]model.ClientIndex{ + nbdb.ACLTable: { + {Columns: []model.ColumnKey{{Column: "sample_new"}}}, + {Columns: []model.ColumnKey{{Column: "sample_est"}}}, + }, + }) + + c, err := newClient(cfg, dbModel) + if err != nil { + return nil, err + } + + _, err = c.Monitor(ctx, + c.NewMonitor( + client.WithTable(&nbdb.ACL{}), + client.WithTable(&nbdb.Sample{}), + ), + ) + + if err != nil { + c.Close() + return nil, err + } + + return c, nil +} + +func NewOVSDBClientWithConfig(ctx context.Context, cfg dbConfig) (client.Client, error) { + dbModel, err := ovsdb.ObservDatabaseModel() + if err != nil { + return nil, err + } + + c, err := newClient(cfg, dbModel) + if err != nil { + return nil, err + } + + _, err = c.Monitor(ctx, + c.NewMonitor( + client.WithTable(&ovsdb.FlowSampleCollectorSet{}), + client.WithTable(&ovsdb.Bridge{}), + ), + ) + if err != nil { + c.Close() + return nil, err + } + + return c, nil +} + +// newClient creates a new client object given the provided config +// the stopCh is required to ensure the goroutine for ssl cert +// update is not leaked +func newClient(cfg dbConfig, dbModel model.ClientDBModel) (client.Client, error) { + const connectTimeout = OVSDBTimeout * 2 + const inactivityTimeout = OVSDBTimeout * 18 + // Don't log anything from the libovsdb client by default + config := textlogger.NewConfig(textlogger.Verbosity(0)) + logger := textlogger.NewLogger(config) + + options := []client.Option{ + // Reading and parsing the DB after reconnect at scale can (unsurprisingly) + // take longer than a normal ovsdb operation. Give it a bit more time, so + // we don't time out and enter a reconnect loop. In addition, it also enables + // inactivity check on the ovsdb connection. + client.WithInactivityCheck(inactivityTimeout, connectTimeout, &backoff.ZeroBackOff{}), + client.WithLeaderOnly(true), + client.WithLogger(&logger), + } + + for _, endpoint := range strings.Split(cfg.address, ",") { + options = append(options, client.WithEndpoint(endpoint)) + } + if cfg.scheme != "unix" { + return nil, fmt.Errorf("only unix scheme is supported for now") + } + + client, err := client.NewOVSDBClient(dbModel, options...) + if err != nil { + return nil, err + } + + ctx, cancel := context.WithTimeout(context.Background(), connectTimeout) + defer cancel() + err = client.Connect(ctx) + if err != nil { + return nil, err + } + + return client, nil +} diff --git a/go-controller/observability-lib/sampledecoder/sample_decoder.go b/go-controller/observability-lib/sampledecoder/sample_decoder.go new file mode 100644 index 0000000000..d691fd9cca --- /dev/null +++ b/go-controller/observability-lib/sampledecoder/sample_decoder.go @@ -0,0 +1,293 @@ +package sampledecoder + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "strings" + + "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/model" + "github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/observability" +) + +type SampleDecoder struct { + nbClient client.Client + ovsdbClient client.Client + cleanupCollectors []int +} + +type dbConfig struct { + address string + scheme string +} + +type Cookie struct { + ObsDomainID uint32 + ObsPointID uint32 +} + +const CookieSize = 8 +const bridgeName = "br-int" + +var SampleEndian = getEndian() + +func getEndian() binary.ByteOrder { + // Use network bite order + return binary.BigEndian +} + +// getLocalNBClient only supports connecting to nbdb via unix socket. +// address is the path to the unix socket, e.g. "/var/run/ovn/ovnnb_db.sock" +func getLocalNBClient(ctx context.Context, address string) (client.Client, error) { + config := dbConfig{ + address: "unix:" + address, + scheme: "unix", + } + libovsdbOvnNBClient, err := NewNBClientWithConfig(ctx, config) + if err != nil { + return nil, fmt.Errorf("error creating libovsdb client: %w ", err) + } + return libovsdbOvnNBClient, nil +} + +func getLocalOVSDBClient(ctx context.Context) (client.Client, error) { + config := dbConfig{ + address: "unix:/var/run/openvswitch/db.sock", + scheme: "unix", + } + return NewOVSDBClientWithConfig(ctx, config) +} + +// NewSampleDecoderWithDefaultCollector creates a new SampleDecoder, initializes the OVSDB client and adds the default collector. +// It allows to set the groupID and ownerName for the created default collector. +// If the default collector already exists with a different owner or different groupID an error will be returned. +// Shutdown should be called to clean up the collector from the OVSDB. +func NewSampleDecoderWithDefaultCollector(ctx context.Context, nbdbSocketPath string, ownerName string, groupID int) (*SampleDecoder, error) { + nbClient, err := getLocalNBClient(ctx, nbdbSocketPath) + if err != nil { + return nil, err + } + ovsdbClient, err := getLocalOVSDBClient(ctx) + if err != nil { + return nil, err + } + decoder := &SampleDecoder{ + nbClient: nbClient, + ovsdbClient: ovsdbClient, + } + err = decoder.AddCollector(observability.DefaultObservabilityCollectorSetID, groupID, ownerName) + if err != nil { + return nil, err + } + decoder.cleanupCollectors = append(decoder.cleanupCollectors, observability.DefaultObservabilityCollectorSetID) + return decoder, nil +} + +// NewSampleDecoder creates a new SampleDecoder and initializes the OVSDB client. +func NewSampleDecoder(ctx context.Context, nbdbSocketPath string) (*SampleDecoder, error) { + nbClient, err := getLocalNBClient(ctx, nbdbSocketPath) + if err != nil { + return nil, err + } + return &SampleDecoder{ + nbClient: nbClient, + }, nil +} + +func (d *SampleDecoder) Shutdown() { + for _, collectorID := range d.cleanupCollectors { + err := d.DeleteCollector(collectorID) + if err != nil { + fmt.Printf("Error deleting collector with ID=%d: %v", collectorID, err) + } + } +} + +func getObservAppID(obsDomainID uint32) uint8 { + return uint8(obsDomainID >> 24) +} + +// findACLBySample relies on the client index based on sample_new and sample_est column. +func findACLBySample(nbClient client.Client, acl *nbdb.ACL) ([]*nbdb.ACL, error) { + found := []*nbdb.ACL{} + err := nbClient.Where(acl).List(context.Background(), &found) + return found, err +} + +func (d *SampleDecoder) DecodeCookieIDs(obsDomainID, obsPointID uint32) (model.NetworkEvent, error) { + // Find sample using obsPointID + sample, err := libovsdbops.FindSample(d.nbClient, int(obsPointID)) + if err != nil || sample == nil { + return nil, fmt.Errorf("find sample failed: %w", err) + } + // find db object using observ application ID + // Since ACL is indexed both by sample_new and sample_est, when searching by one of them, + // we need to make sure the other one will not match. + // nil is a valid index value, therefore we have to use non-existing UUID. + wrongUUID := "wrongUUID" + var dbObj interface{} + switch getObservAppID(obsDomainID) { + case observability.ACLNewTrafficSamplingID: + acls, err := findACLBySample(d.nbClient, &nbdb.ACL{SampleNew: &sample.UUID, SampleEst: &wrongUUID}) + if err != nil { + return nil, fmt.Errorf("find acl for sample failed: %w", err) + } + if len(acls) != 1 { + return nil, fmt.Errorf("expected 1 ACL, got %d", len(acls)) + } + dbObj = acls[0] + case observability.ACLEstTrafficSamplingID: + acls, err := findACLBySample(d.nbClient, &nbdb.ACL{SampleNew: &wrongUUID, SampleEst: &sample.UUID}) + if err != nil { + return nil, fmt.Errorf("find acl for sample failed: %w", err) + } + if len(acls) != 1 { + return nil, fmt.Errorf("expected 1 ACL, got %d", len(acls)) + } + dbObj = acls[0] + default: + return nil, fmt.Errorf("unknown app ID: %d", getObservAppID(obsDomainID)) + } + var event model.NetworkEvent + switch o := dbObj.(type) { + case *nbdb.ACL: + event, err = newACLEvent(o) + if err != nil { + return nil, fmt.Errorf("failed to build ACL network event: %w", err) + } + } + if event == nil { + return nil, fmt.Errorf("failed to build network event for db object %v", dbObj) + } + return event, nil +} + +func newACLEvent(o *nbdb.ACL) (*model.ACLEvent, error) { + actor := o.ExternalIDs[libovsdbops.OwnerTypeKey.String()] + event := model.ACLEvent{ + Action: o.Action, + Actor: actor, + } + switch actor { + case libovsdbops.NetworkPolicyOwnerType: + objName := o.ExternalIDs[libovsdbops.ObjectNameKey.String()] + nsname := strings.SplitN(objName, ":", 2) + if len(nsname) == 2 { + event.Namespace = nsname[0] + event.Name = nsname[1] + } else { + return nil, fmt.Errorf("expected format namespace:name for Object Name, but found: %s", objName) + } + event.Direction = o.ExternalIDs[libovsdbops.PolicyDirectionKey.String()] + case libovsdbops.AdminNetworkPolicyOwnerType, libovsdbops.BaselineAdminNetworkPolicyOwnerType: + event.Name = o.ExternalIDs[libovsdbops.ObjectNameKey.String()] + event.Direction = o.ExternalIDs[libovsdbops.PolicyDirectionKey.String()] + case libovsdbops.MulticastNamespaceOwnerType, libovsdbops.NetpolNamespaceOwnerType: + event.Namespace = o.ExternalIDs[libovsdbops.ObjectNameKey.String()] + event.Direction = o.ExternalIDs[libovsdbops.PolicyDirectionKey.String()] + case libovsdbops.MulticastClusterOwnerType: + event.Direction = o.ExternalIDs[libovsdbops.PolicyDirectionKey.String()] + case libovsdbops.EgressFirewallOwnerType: + event.Namespace = o.ExternalIDs[libovsdbops.ObjectNameKey.String()] + event.Direction = "Egress" + case libovsdbops.UDNIsolationOwnerType: + event.Name = o.ExternalIDs[libovsdbops.ObjectNameKey.String()] + case libovsdbops.NetpolNodeOwnerType: + event.Direction = "Ingress" + } + return &event, nil +} + +func (d *SampleDecoder) DecodeCookieBytes(cookie []byte) (model.NetworkEvent, error) { + if uint64(len(cookie)) != CookieSize { + return nil, fmt.Errorf("invalid cookie size: %d", len(cookie)) + } + c := Cookie{} + err := binary.Read(bytes.NewReader(cookie), SampleEndian, &c) + if err != nil { + return nil, err + } + return d.DecodeCookieIDs(c.ObsDomainID, c.ObsPointID) +} + +func (d *SampleDecoder) DecodeCookie8Bytes(cookie [8]byte) (model.NetworkEvent, error) { + c := Cookie{} + err := binary.Read(bytes.NewReader(cookie[:]), SampleEndian, &c) + if err != nil { + return nil, err + } + return d.DecodeCookieIDs(c.ObsDomainID, c.ObsPointID) +} + +func getGroupID(groupID *int) string { + if groupID == nil { + return "unset" + } + return fmt.Sprintf("%d", *groupID) +} + +func (d *SampleDecoder) AddCollector(collectorID, groupID int, ownerName string) error { + if d.ovsdbClient == nil { + return fmt.Errorf("OVSDB client is not initialized") + } + // find existing collector with the same ID + collectors := []*ovsdb.FlowSampleCollectorSet{} + err := d.ovsdbClient.WhereCache(func(item *ovsdb.FlowSampleCollectorSet) bool { + return item.ID == collectorID + }).List(context.Background(), &collectors) + if err != nil { + return fmt.Errorf("failed finding existing collector: %w", err) + } + if len(collectors) > 0 && (collectors[0].ExternalIDs["owner"] != ownerName || + collectors[0].LocalGroupID == nil || *collectors[0].LocalGroupID != groupID) { + return fmt.Errorf("requested collector with id=%v already exists "+ + "with the external_ids=%+v, local_group_id=%v", collectorID, collectors[0].ExternalIDs["owner"], getGroupID(collectors[0].LocalGroupID)) + } + + // find br-int UUID to attach collector + bridges := []*ovsdb.Bridge{} + err = d.ovsdbClient.WhereCache(func(item *ovsdb.Bridge) bool { + return item.Name == bridgeName + }).List(context.Background(), &bridges) + if err != nil || len(bridges) != 1 { + return fmt.Errorf("failed finding br-int: %w", err) + } + + ops, err := d.ovsdbClient.Create(&ovsdb.FlowSampleCollectorSet{ + ID: collectorID, + Bridge: bridges[0].UUID, + LocalGroupID: &groupID, + ExternalIDs: map[string]string{"owner": ownerName}, + }) + if err != nil { + return fmt.Errorf("failed creating collector: %w", err) + } + _, err = d.ovsdbClient.Transact(context.Background(), ops...) + return err +} + +func (d *SampleDecoder) DeleteCollector(collectorID int) error { + collectors := []*ovsdb.FlowSampleCollectorSet{} + err := d.ovsdbClient.WhereCache(func(item *ovsdb.FlowSampleCollectorSet) bool { + return item.ID == collectorID + }).List(context.Background(), &collectors) + if err != nil { + return fmt.Errorf("failed finding exisiting collector: %w", err) + } + if len(collectors) != 1 { + return fmt.Errorf("expected only 1 collector with given id") + } + + ops, err := d.ovsdbClient.Where(collectors[0]).Delete() + if err != nil { + return fmt.Errorf("failed creating collector: %w", err) + } + res, err := d.ovsdbClient.Transact(context.Background(), ops...) + fmt.Println("res: ", res) + return err +} diff --git a/go-controller/observability-lib/sampledecoder/sample_decoder_test.go b/go-controller/observability-lib/sampledecoder/sample_decoder_test.go new file mode 100644 index 0000000000..9af4eedd83 --- /dev/null +++ b/go-controller/observability-lib/sampledecoder/sample_decoder_test.go @@ -0,0 +1,66 @@ +package sampledecoder + +import ( + "testing" + + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" + libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + "github.com/stretchr/testify/assert" +) + +func TestCreateOrUpdateACL(t *testing.T) { + event, err := newACLEvent(&nbdb.ACL{ + Action: nbdb.ACLActionAllow, + ExternalIDs: map[string]string{ + libovsdbops.OwnerTypeKey.String(): libovsdbops.NetworkPolicyOwnerType, + libovsdbops.ObjectNameKey.String(): "foo", + libovsdbops.PolicyDirectionKey.String(): string(libovsdbutil.ACLIngress), + }, + }) + assert.ErrorContains(t, err, "expected format namespace:name for Object Name, but found: foo") + assert.Nil(t, event) + + event, err = newACLEvent(&nbdb.ACL{ + Action: nbdb.ACLActionAllow, + ExternalIDs: map[string]string{ + libovsdbops.OwnerTypeKey.String(): libovsdbops.NetworkPolicyOwnerType, + libovsdbops.ObjectNameKey.String(): "bar:foo", + libovsdbops.PolicyDirectionKey.String(): string(libovsdbutil.ACLIngress), + }, + }) + assert.NoError(t, err) + assert.Equal(t, "Allowed by network policy foo in namespace bar, direction Ingress", event.String()) + + event, err = newACLEvent(&nbdb.ACL{ + Action: nbdb.ACLActionAllow, + ExternalIDs: map[string]string{ + libovsdbops.OwnerTypeKey.String(): libovsdbops.AdminNetworkPolicyOwnerType, + libovsdbops.ObjectNameKey.String(): "foo", + libovsdbops.PolicyDirectionKey.String(): string(libovsdbutil.ACLIngress), + }, + }) + assert.NoError(t, err) + assert.Equal(t, "Allowed by admin network policy foo, direction Ingress", event.String()) + + event, err = newACLEvent(&nbdb.ACL{ + Action: nbdb.ACLActionAllow, + ExternalIDs: map[string]string{ + libovsdbops.OwnerTypeKey.String(): libovsdbops.EgressFirewallOwnerType, + libovsdbops.ObjectNameKey.String(): "foo", + }, + }) + assert.NoError(t, err) + assert.Equal(t, "Allowed by egress firewall in namespace foo", event.String()) + assert.Equal(t, "Egress", event.Direction) + + event, err = newACLEvent(&nbdb.ACL{ + Action: nbdb.ACLActionAllow, + ExternalIDs: map[string]string{ + libovsdbops.OwnerTypeKey.String(): libovsdbops.NetpolNodeOwnerType, + }, + }) + assert.NoError(t, err) + assert.Equal(t, "Allowed by default allow from local node policy, direction Ingress", event.String()) + assert.Equal(t, "Ingress", event.Direction) +} diff --git a/go-controller/pkg/allocator/id/allocator.go b/go-controller/pkg/allocator/id/allocator.go index c21729d865..a2a08a3b3f 100644 --- a/go-controller/pkg/allocator/id/allocator.go +++ b/go-controller/pkg/allocator/id/allocator.go @@ -33,13 +33,13 @@ type idAllocator struct { } // NewIDAllocator returns an IDAllocator -func NewIDAllocator(name string, maxIds int) (Allocator, error) { +func NewIDAllocator(name string, maxIds int) Allocator { idBitmap := bitmapallocator.NewRoundRobinAllocationMap(maxIds, name) return &idAllocator{ nameIdMap: sync.Map{}, idBitmap: idBitmap, - }, nil + } } // AllocateID allocates an id for the resource 'name' and returns the id. diff --git a/go-controller/pkg/allocator/ip/subnet/allocator.go b/go-controller/pkg/allocator/ip/subnet/allocator.go index e7e97e8598..7db69aa967 100644 --- a/go-controller/pkg/allocator/ip/subnet/allocator.go +++ b/go-controller/pkg/allocator/ip/subnet/allocator.go @@ -21,7 +21,7 @@ type Allocator interface { DeleteSubnet(name string) GetSubnets(name string) ([]*net.IPNet, error) AllocateUntilFull(name string) error - AllocateIPs(name string, ips []*net.IPNet) error + AllocateIPPerSubnet(name string, ips []*net.IPNet) error AllocateNextIPs(name string) ([]*net.IPNet, error) ReleaseIPs(name string, ips []*net.IPNet) error ConditionalIPRelease(name string, ips []*net.IPNet, predicate func() (bool, error)) (bool, error) @@ -159,9 +159,10 @@ func (allocator *allocator) AllocateUntilFull(name string) error { return nil } -// AllocateIPs will block off IPs in the ipnets slice as already allocated -// for a given subnet set -func (allocator *allocator) AllocateIPs(name string, ips []*net.IPNet) error { +// AllocateIPPerSubnet will block off IPs in the ipnets slice as already +// allocated in each of the subnets it manages. ips *must* feature a single IP +// on each of the subnets managed by the allocator. +func (allocator *allocator) AllocateIPPerSubnet(name string, ips []*net.IPNet) error { if len(ips) == 0 { return fmt.Errorf("failed to allocate IPs for %s: no IPs provided", name) } @@ -364,7 +365,7 @@ type IPAllocator struct { // AllocateIPs allocates the requested IPs func (ipAllocator *IPAllocator) AllocateIPs(ips []*net.IPNet) error { - return ipAllocator.allocator.AllocateIPs(ipAllocator.name, ips) + return ipAllocator.allocator.AllocateIPPerSubnet(ipAllocator.name, ips) } // AllocateNextIPs allocates the next available IPs diff --git a/go-controller/pkg/allocator/ip/subnet/allocator_test.go b/go-controller/pkg/allocator/ip/subnet/allocator_test.go index 4f035a2534..d759b4444b 100644 --- a/go-controller/pkg/allocator/ip/subnet/allocator_test.go +++ b/go-controller/pkg/allocator/ip/subnet/allocator_test.go @@ -5,12 +5,14 @@ import ( ipam "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/ip" ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" ) var _ = ginkgo.Describe("Subnet IP allocator operations", func() { + const subnetName = "subnet1" var ( allocator Allocator ) @@ -21,7 +23,6 @@ var _ = ginkgo.Describe("Subnet IP allocator operations", func() { ginkgo.Context("when adding subnets", func() { ginkgo.It("creates each IPAM and reserves IPs correctly", func() { - subnetName := "subnet1" subnets := []string{ "10.1.1.0/24", "2000::/64", @@ -40,7 +41,6 @@ var _ = ginkgo.Describe("Subnet IP allocator operations", func() { }) ginkgo.It("handles updates to the subnets correctly", func() { - subnetName := "subnet1" subnets := []string{ "10.1.1.0/24", "2000::/64", @@ -69,7 +69,6 @@ var _ = ginkgo.Describe("Subnet IP allocator operations", func() { }) ginkgo.It("excludes subnets correctly", func() { - subnetName := "subnet1" subnets := []string{ "10.1.1.0/24", } @@ -93,7 +92,6 @@ var _ = ginkgo.Describe("Subnet IP allocator operations", func() { ginkgo.Context("when allocating IP addresses", func() { ginkgo.It("IPAM for each subnet allocates IPs contiguously", func() { - subnetName := "subnet1" subnets := []string{ "10.1.1.0/24", "2000::/64", @@ -116,7 +114,6 @@ var _ = ginkgo.Describe("Subnet IP allocator operations", func() { }) ginkgo.It("IPAM allocates, releases, and reallocates IPs correctly", func() { - subnetName := "subnet1" subnets := []string{ "10.1.1.0/24", } @@ -136,13 +133,24 @@ var _ = ginkgo.Describe("Subnet IP allocator operations", func() { } err = allocator.ReleaseIPs(subnetName, ips) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = allocator.AllocateIPs(subnetName, ips) + err = allocator.AllocateIPPerSubnet(subnetName, ips) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }) + ginkgo.It("fails to allocate multiple IPs from the same subnet", func() { + subnets := []string{"10.1.1.0/24", "2000::/64"} + + gomega.Expect(allocator.AddOrUpdateSubnet(subnetName, ovntest.MustParseIPNets(subnets...))).To(gomega.Succeed()) + + ips, err := util.ParseIPNets([]string{"10.1.1.1/24", "10.1.1.2/24"}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(allocator.AllocateIPPerSubnet(subnetName, ips)).To(gomega.MatchError( + "failed to allocate IP 10.1.1.2 for subnet1: attempted to reserve multiple IPs in the same IPAM instance", + )) + }) + ginkgo.It("releases IPs for other subnets when any other subnet allocation fails", func() { - subnetName := "subnet1" subnets := []string{ "10.1.1.0/24", "10.1.2.0/29", @@ -184,7 +192,6 @@ var _ = ginkgo.Describe("Subnet IP allocator operations", func() { }) ginkgo.It("fails correctly when trying to block a previously allocated IP", func() { - subnetName := "subnet1" subnets := []string{ "10.1.1.0/24", "2000::/64", @@ -203,7 +210,7 @@ var _ = ginkgo.Describe("Subnet IP allocator operations", func() { gomega.Expect(ip.String()).To(gomega.Equal(expectedIPs[i])) } gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = allocator.AllocateIPs(subnetName, ovntest.MustParseIPNets(expectedIPs...)) + err = allocator.AllocateIPPerSubnet(subnetName, ovntest.MustParseIPNets(expectedIPs...)) gomega.Expect(err).To(gomega.MatchError(ipam.ErrAllocated)) }) diff --git a/go-controller/pkg/clustermanager/clustermanager.go b/go-controller/pkg/clustermanager/clustermanager.go index 910a1ec308..f97659994a 100644 --- a/go-controller/pkg/clustermanager/clustermanager.go +++ b/go-controller/pkg/clustermanager/clustermanager.go @@ -114,7 +114,7 @@ func NewClusterManager(ovnClient *util.OVNClusterManagerClientset, wf *factory.W } } if util.IsNetworkSegmentationSupportEnabled() { - cm.endpointSliceMirrorController, err = endpointslicemirror.NewController(ovnClient, wf) + cm.endpointSliceMirrorController, err = endpointslicemirror.NewController(ovnClient, wf, cm.secondaryNetClusterManager.nadController) if err != nil { return nil, err } @@ -131,11 +131,17 @@ func NewClusterManager(ovnClient *util.OVNClusterManagerClientset, wf *factory.W if util.IsNetworkSegmentationSupportEnabled() { udnController := udncontroller.New( ovnClient.NetworkAttchDefClient, wf.NADInformer(), - ovnClient.UserDefinedNetworkClient, wf.UserDefinedNetworkInformer(), + ovnClient.UserDefinedNetworkClient, + wf.UserDefinedNetworkInformer(), wf.ClusterUserDefinedNetworkInformer(), udntemplate.RenderNetAttachDefManifest, wf.PodCoreInformer(), + wf.NamespaceInformer(), + cm.recorder, ) cm.userDefinedNetworkController = udnController + if cm.secondaryNetClusterManager != nil { + cm.secondaryNetClusterManager.SetNetworkStatusReporter(udnController.UpdateSubsystemCondition) + } } return cm, nil @@ -150,6 +156,13 @@ func (cm *ClusterManager) Start(ctx context.Context) error { return err } + // Start secondary CM first so that NAD controller initializes before other controllers + if config.OVNKubernetesFeature.EnableMultiNetwork { + if err := cm.secondaryNetClusterManager.Start(); err != nil { + return err + } + } + if err := cm.defaultNetClusterController.Start(ctx); err != nil { return err } @@ -158,12 +171,6 @@ func (cm *ClusterManager) Start(ctx context.Context) error { return fmt.Errorf("could not start zone controller, err: %w", err) } - if config.OVNKubernetesFeature.EnableMultiNetwork { - if err := cm.secondaryNetClusterManager.Start(); err != nil { - return err - } - } - if config.OVNKubernetesFeature.EnableEgressIP { if err := cm.eIPC.Start(); err != nil { return err diff --git a/go-controller/pkg/clustermanager/clustermanager_suite_test.go b/go-controller/pkg/clustermanager/clustermanager_suite_test.go index 5e089375e3..458fe6fac2 100644 --- a/go-controller/pkg/clustermanager/clustermanager_suite_test.go +++ b/go-controller/pkg/clustermanager/clustermanager_suite_test.go @@ -3,7 +3,7 @@ package clustermanager import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/clustermanager/clustermanager_test.go b/go-controller/pkg/clustermanager/clustermanager_test.go index 4c6301b80e..b5714dfb19 100644 --- a/go-controller/pkg/clustermanager/clustermanager_test.go +++ b/go-controller/pkg/clustermanager/clustermanager_test.go @@ -11,7 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" hotypes "github.com/ovn-org/ovn-kubernetes/go-controller/hybrid-overlay/pkg/types" diff --git a/go-controller/pkg/clustermanager/dnsnameresolver/controller.go b/go-controller/pkg/clustermanager/dnsnameresolver/controller.go index 4da6334382..fbee9f72ab 100644 --- a/go-controller/pkg/clustermanager/dnsnameresolver/controller.go +++ b/go-controller/pkg/clustermanager/dnsnameresolver/controller.go @@ -61,7 +61,7 @@ func (c *Controller) initControllers(watchFactory *factory.WatchFactory) { efSharedIndexInformer := watchFactory.EgressFirewallInformer().Informer() c.efLister = watchFactory.EgressFirewallInformer().Lister() efConfig := &controller.ControllerConfig[egressfirewall.EgressFirewall]{ - RateLimiter: workqueue.NewItemFastSlowRateLimiter(time.Second, 5*time.Second, 5), + RateLimiter: workqueue.NewTypedItemFastSlowRateLimiter[string](time.Second, 5*time.Second, 5), Informer: efSharedIndexInformer, Lister: c.efLister.List, ObjNeedsUpdate: efNeedsUpdate, @@ -73,7 +73,7 @@ func (c *Controller) initControllers(watchFactory *factory.WatchFactory) { dnsSharedIndexInformer := watchFactory.DNSNameResolverInformer().Informer() c.dnsLister = ocpnetworklisterv1alpha1.NewDNSNameResolverLister(dnsSharedIndexInformer.GetIndexer()) dnsConfig := &controller.ControllerConfig[ocpnetworkapiv1alpha1.DNSNameResolver]{ - RateLimiter: workqueue.NewItemFastSlowRateLimiter(time.Second, 5*time.Second, 5), + RateLimiter: workqueue.NewTypedItemFastSlowRateLimiter[string](time.Second, 5*time.Second, 5), Informer: dnsSharedIndexInformer, Lister: c.dnsLister.List, ObjNeedsUpdate: dnsNeedsUpdate, diff --git a/go-controller/pkg/clustermanager/dnsnameresolver/controller_suite_test.go b/go-controller/pkg/clustermanager/dnsnameresolver/controller_suite_test.go index b6717689e2..376f8324d9 100644 --- a/go-controller/pkg/clustermanager/dnsnameresolver/controller_suite_test.go +++ b/go-controller/pkg/clustermanager/dnsnameresolver/controller_suite_test.go @@ -3,7 +3,7 @@ package dnsnameresolver import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/clustermanager/dnsnameresolver/controller_test.go b/go-controller/pkg/clustermanager/dnsnameresolver/controller_test.go index c131ad6baa..4ac0c855b5 100644 --- a/go-controller/pkg/clustermanager/dnsnameresolver/controller_test.go +++ b/go-controller/pkg/clustermanager/dnsnameresolver/controller_test.go @@ -4,7 +4,7 @@ import ( "context" "time" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" ocpnetworkapiv1alpha1 "github.com/openshift/api/network/v1alpha1" ocpnetworklisterv1alpha1 "github.com/openshift/client-go/network/listers/network/v1alpha1" diff --git a/go-controller/pkg/clustermanager/egressip_controller.go b/go-controller/pkg/clustermanager/egressip_controller.go index 45e255da5b..f22de3b762 100644 --- a/go-controller/pkg/clustermanager/egressip_controller.go +++ b/go-controller/pkg/clustermanager/egressip_controller.go @@ -15,6 +15,7 @@ import ( "time" ocpcloudnetworkapi "github.com/openshift/api/cloudnetwork/v1" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/id" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" egressipv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" @@ -116,50 +117,20 @@ func (e *egressNode) getAllocationCountForEgressIP(name string) (count int) { return } -type EgressIPPatchStatus struct { - Op string `json:"op"` - Path string `json:"path"` - Value egressipv1.EgressIPStatus `json:"value"` -} - -// patchReplaceEgressIPStatus performs a replace patch operation of the egress -// IP status by replacing the status with the provided value. This allows us to -// update only the status field, without overwriting any other. This is -// important because processing egress IPs can take a while (when running on a -// public cloud and in the worst case), hence we don't want to perform a full -// object update which risks resetting the EgressIP object's fields to the state -// they had when we started processing the change. -func (eIPC *egressIPClusterController) patchReplaceEgressIPStatus(name string, statusItems []egressipv1.EgressIPStatusItem) error { - klog.Infof("Patching status on EgressIP %s: %v", name, statusItems) - return retry.RetryOnConflict(retry.DefaultRetry, func() error { - t := []EgressIPPatchStatus{ - { - Op: "replace", - Path: "/status", - Value: egressipv1.EgressIPStatus{ - Items: statusItems, - }, - }, - } - op, err := json.Marshal(&t) - if err != nil { - return fmt.Errorf("error serializing status patch operation: %+v, err: %v", statusItems, err) - } - return eIPC.kube.PatchEgressIP(name, op) - }) -} - func (eIPC *egressIPClusterController) getAllocationTotalCount() float64 { count := 0 - eIPC.allocator.Lock() - defer eIPC.allocator.Unlock() - for _, eNode := range eIPC.allocator.cache { + eIPC.nodeAllocator.Lock() + defer eIPC.nodeAllocator.Unlock() + for _, eNode := range eIPC.nodeAllocator.cache { count += len(eNode.allocations) } return float64(count) } -type allocator struct { +// nodeAllocator contains all the information required to manage EgressIP assignment to egress node. This includes assignment +// of EgressIP IPs to nodes and ensuring the egress nodes are reachable. For cloud nodes, it also tracks limits for +// IP assignment to each node. +type nodeAllocator struct { *sync.Mutex // A cache used for egress IP assignments containing data for all cluster nodes // used for egress IP assignments @@ -365,9 +336,10 @@ type egressIPClusterController struct { // - On update: once we finish processing the add - which comes after the // delete. pendingCloudPrivateIPConfigsOps map[string]map[string]*cloudPrivateIPConfigOp - // allocator is a cache of egress IP centric data needed to when both route + // nodeAllocator is a cache of egress IP centric data needed to when both route // health-checking and tracking allocations made - allocator allocator + nodeAllocator nodeAllocator + markAllocator id.Allocator // watchFactory watching k8s objects watchFactory *factory.WatchFactory // EgressIP Node reachability total timeout configuration @@ -396,6 +368,8 @@ func newEgressIPController(ovnClient *util.OVNClusterManagerClientset, wf *facto EIPClient: ovnClient.EgressIPClient, CloudNetworkClient: ovnClient.CloudNetworkClient, } + markAllocator := getEgressIPMarkAllocator() + wg := &sync.WaitGroup{} eIPC := &egressIPClusterController{ kube: kube, @@ -403,7 +377,8 @@ func newEgressIPController(ovnClient *util.OVNClusterManagerClientset, wf *facto egressIPAssignmentMutex: &sync.Mutex{}, pendingCloudPrivateIPConfigsMutex: &sync.Mutex{}, pendingCloudPrivateIPConfigsOps: make(map[string]map[string]*cloudPrivateIPConfigOp), - allocator: allocator{&sync.Mutex{}, make(map[string]*egressNode)}, + nodeAllocator: nodeAllocator{&sync.Mutex{}, make(map[string]*egressNode)}, + markAllocator: markAllocator, watchFactory: wf, recorder: recorder, egressIPTotalTimeout: config.OVNKubernetesFeature.EgressIPReachabiltyTotalTimeout, @@ -505,7 +480,7 @@ type egressIPNodeStatus struct { func (eIPC *egressIPClusterController) getSortedEgressData() ([]*egressNode, map[string]egressIPNodeStatus) { assignableNodes := []*egressNode{} allAllocations := make(map[string]egressIPNodeStatus) - for _, eNode := range eIPC.allocator.cache { + for _, eNode := range eIPC.nodeAllocator.cache { if eNode.isEgressAssignable && eNode.isReady && eNode.isReachable { assignableNodes = append(assignableNodes, eNode) } @@ -525,9 +500,9 @@ func (eIPC *egressIPClusterController) initEgressNodeReachability(nodes []interf } func (eIPC *egressIPClusterController) setNodeEgressAssignable(nodeName string, isAssignable bool) { - eIPC.allocator.Lock() - defer eIPC.allocator.Unlock() - if eNode, exists := eIPC.allocator.cache[nodeName]; exists { + eIPC.nodeAllocator.Lock() + defer eIPC.nodeAllocator.Unlock() + if eNode, exists := eIPC.nodeAllocator.cache[nodeName]; exists { eNode.isEgressAssignable = isAssignable // if the node is not assignable/ready/reachable anymore we need to // empty all of it's allocations from our cache since we'll clear all @@ -606,8 +581,8 @@ func (eIPC *egressIPClusterController) checkEgressNodesReachability() { func checkEgressNodesReachabilityIterate(eIPC *egressIPClusterController) { reAddOrDelete := map[string]bool{} - eIPC.allocator.Lock() - for _, eNode := range eIPC.allocator.cache { + eIPC.nodeAllocator.Lock() + for _, eNode := range eIPC.nodeAllocator.cache { if eNode.isEgressAssignable && eNode.isReady { wasReachable := eNode.isReachable isReachable := eIPC.isReachable(eNode.name, eNode.mgmtIPs, eNode.healthClient) @@ -625,7 +600,7 @@ func checkEgressNodesReachabilityIterate(eIPC *egressIPClusterController) { eNode.healthClient.Disconnect() } } - eIPC.allocator.Unlock() + eIPC.nodeAllocator.Unlock() for nodeName, shouldDelete := range reAddOrDelete { if shouldDelete { metrics.RecordEgressIPUnreachableNode() @@ -658,18 +633,18 @@ func (eIPC *egressIPClusterController) isReachable(nodeName string, mgmtIPs []ne } func (eIPC *egressIPClusterController) isEgressNodeReachable(egressNode *v1.Node) bool { - eIPC.allocator.Lock() - defer eIPC.allocator.Unlock() - if eNode, exists := eIPC.allocator.cache[egressNode.Name]; exists { + eIPC.nodeAllocator.Lock() + defer eIPC.nodeAllocator.Unlock() + if eNode, exists := eIPC.nodeAllocator.cache[egressNode.Name]; exists { return eNode.isReachable || eIPC.isReachable(eNode.name, eNode.mgmtIPs, eNode.healthClient) } return false } func (eIPC *egressIPClusterController) setNodeEgressReady(nodeName string, isReady bool) { - eIPC.allocator.Lock() - defer eIPC.allocator.Unlock() - if eNode, exists := eIPC.allocator.cache[nodeName]; exists { + eIPC.nodeAllocator.Lock() + defer eIPC.nodeAllocator.Unlock() + if eNode, exists := eIPC.nodeAllocator.cache[nodeName]; exists { eNode.isReady = isReady // see setNodeEgressAssignable if !isReady { @@ -679,9 +654,9 @@ func (eIPC *egressIPClusterController) setNodeEgressReady(nodeName string, isRea } func (eIPC *egressIPClusterController) setNodeEgressReachable(nodeName string, isReachable bool) { - eIPC.allocator.Lock() - defer eIPC.allocator.Unlock() - if eNode, exists := eIPC.allocator.cache[nodeName]; exists { + eIPC.nodeAllocator.Lock() + defer eIPC.nodeAllocator.Unlock() + if eNode, exists := eIPC.nodeAllocator.cache[nodeName]; exists { eNode.isReachable = isReachable // see setNodeEgressAssignable if !isReachable { @@ -700,7 +675,7 @@ func (eIPC *egressIPClusterController) reconcileSecondaryHostNetworkEIPs(node *v return fmt.Errorf("unable to list EgressIPs, err: %v", err) } reconcileEgressIPs := make([]*egressipv1.EgressIP, 0, len(egressIPs)) - eIPC.allocator.Lock() + eIPC.nodeAllocator.Lock() for _, egressIP := range egressIPs { egressIP := *egressIP for _, status := range egressIP.Status.Items { @@ -709,7 +684,7 @@ func (eIPC *egressIPClusterController) reconcileSecondaryHostNetworkEIPs(node *v if egressIPIP == nil { return fmt.Errorf("unexpected empty egress IP found in status for egressIP %s", egressIP.Name) } - eNode, exists := eIPC.allocator.cache[status.Node] + eNode, exists := eIPC.nodeAllocator.cache[status.Node] if !exists { reconcileEgressIPs = append(reconcileEgressIPs, egressIP.DeepCopy()) continue @@ -730,7 +705,7 @@ func (eIPC *egressIPClusterController) reconcileSecondaryHostNetworkEIPs(node *v } } } - eIPC.allocator.Unlock() + eIPC.nodeAllocator.Unlock() for _, egressIP := range reconcileEgressIPs { if err := eIPC.reconcileEgressIP(nil, egressIP); err != nil { errorAggregate = append(errorAggregate, fmt.Errorf("re-assignment for EgressIP %s hosted by a "+ @@ -779,12 +754,12 @@ func (eIPC *egressIPClusterController) addEgressNode(nodeName string) error { // deleteNodeForEgress remove the default allow logical router policies for the // node and removes the node from the allocator cache. func (eIPC *egressIPClusterController) deleteNodeForEgress(node *v1.Node) { - eIPC.allocator.Lock() - if eNode, exists := eIPC.allocator.cache[node.Name]; exists { + eIPC.nodeAllocator.Lock() + if eNode, exists := eIPC.nodeAllocator.cache[node.Name]; exists { eNode.healthClient.Disconnect() } - delete(eIPC.allocator.cache, node.Name) - eIPC.allocator.Unlock() + delete(eIPC.nodeAllocator.cache, node.Name) + eIPC.nodeAllocator.Unlock() } func (eIPC *egressIPClusterController) deleteEgressNode(nodeName string) error { @@ -834,10 +809,10 @@ func (eIPC *egressIPClusterController) initEgressIPAllocator(node *v1.Node) (err for i, subnet := range nodeSubnets { mgmtIPs[i] = util.GetNodeManagementIfAddr(subnet).IP } - eIPC.allocator.Lock() - defer eIPC.allocator.Unlock() - if eNode, exists := eIPC.allocator.cache[node.Name]; !exists { - eIPC.allocator.cache[node.Name] = &egressNode{ + eIPC.nodeAllocator.Lock() + defer eIPC.nodeAllocator.Unlock() + if eNode, exists := eIPC.nodeAllocator.cache[node.Name]; !exists { + eIPC.nodeAllocator.cache[node.Name] = &egressNode{ name: node.Name, egressIPConfig: parsedEgressIPConfig, mgmtIPs: mgmtIPs, @@ -854,10 +829,10 @@ func (eIPC *egressIPClusterController) initEgressIPAllocator(node *v1.Node) (err // deleteAllocatorEgressIPAssignments deletes the allocation as to keep the // cache state correct, also see addAllocatorEgressIPAssignments func (eIPC *egressIPClusterController) deleteAllocatorEgressIPAssignments(statusAssignments []egressipv1.EgressIPStatusItem) { - eIPC.allocator.Lock() - defer eIPC.allocator.Unlock() + eIPC.nodeAllocator.Lock() + defer eIPC.nodeAllocator.Unlock() for _, status := range statusAssignments { - if eNode, exists := eIPC.allocator.cache[status.Node]; exists { + if eNode, exists := eIPC.nodeAllocator.cache[status.Node]; exists { delete(eNode.allocations, status.EgressIP) } } @@ -866,9 +841,9 @@ func (eIPC *egressIPClusterController) deleteAllocatorEgressIPAssignments(status // deleteAllocatorEgressIPAssignmentIfExists deletes egressIP config from node allocations map // if the entry is available and returns assigned node name, otherwise returns empty string. func (eIPC *egressIPClusterController) deleteAllocatorEgressIPAssignmentIfExists(name, egressIP string) string { - eIPC.allocator.Lock() - defer eIPC.allocator.Unlock() - for nodeName, eNode := range eIPC.allocator.cache { + eIPC.nodeAllocator.Lock() + defer eIPC.nodeAllocator.Unlock() + for nodeName, eNode := range eIPC.nodeAllocator.cache { if egressIPName, exists := eNode.allocations[egressIP]; exists && egressIPName == name { delete(eNode.allocations, egressIP) return nodeName @@ -880,10 +855,10 @@ func (eIPC *egressIPClusterController) deleteAllocatorEgressIPAssignmentIfExists // addAllocatorEgressIPAssignments adds the allocation to the cache, so that // they are tracked during the life-cycle of ovnkube-master func (eIPC *egressIPClusterController) addAllocatorEgressIPAssignments(name string, statusAssignments []egressipv1.EgressIPStatusItem) { - eIPC.allocator.Lock() - defer eIPC.allocator.Unlock() + eIPC.nodeAllocator.Lock() + defer eIPC.nodeAllocator.Unlock() for _, status := range statusAssignments { - if eNode, exists := eIPC.allocator.cache[status.Node]; exists { + if eNode, exists := eIPC.nodeAllocator.cache[status.Node]; exists { eNode.allocations[status.EgressIP] = name } } @@ -926,6 +901,8 @@ func (eIPC *egressIPClusterController) reconcileEgressIP(old, new *egressipv1.Eg } } } + } else { + eIPC.deallocMark(name) } // Validate the spec and use only the valid egress IPs when performing any @@ -971,6 +948,12 @@ func (eIPC *egressIPClusterController) reconcileEgressIP(old, new *egressipv1.Eg statusToRemove = append(statusToRemove, status) ipsToRemove.Insert(status.EgressIP) } + // Adding the mark to annotations is bundled with status update in-order to minimise updates, cover the case where there is no update to status + // and mark annotation has been modified / removed. This should only occur for an update and the mark was previous set. + if ipsToAssign.Len() == 0 && ipsToRemove.Len() == 0 { + eIPC.ensureMark(old, new) + } + if ipsToRemove.Len() > 0 { // The following is added as to ensure that we only add after having // successfully removed egress IPs. This case is not very important on @@ -1020,7 +1003,7 @@ func (eIPC *egressIPClusterController) reconcileEgressIP(old, new *egressipv1.Eg // Update the object only on an ADD/UPDATE. If we are processing a // DELETE, new will be nil and we should not update the object. if len(statusToAdd) > 0 || (len(statusToRemove) > 0 && new != nil) { - if err := eIPC.patchReplaceEgressIPStatus(name, statusToKeep); err != nil { + if err := eIPC.patchEgressIP(name, eIPC.generateEgressIPPatches(name, new.Annotations, statusToKeep)...); err != nil { return err } } @@ -1048,7 +1031,7 @@ func (eIPC *egressIPClusterController) reconcileEgressIP(old, new *egressipv1.Eg // Update the object only on an ADD/UPDATE. If we are processing a // DELETE, new will be nil and we should not update the object. if new != nil { - if err := eIPC.patchReplaceEgressIPStatus(name, statusToKeep); err != nil { + if err := eIPC.patchEgressIP(name, eIPC.generateEgressIPPatches(name, new.Annotations, statusToKeep)...); err != nil { return err } } @@ -1132,7 +1115,7 @@ func (eIPC *egressIPClusterController) syncCloudPrivateIPConfigs(objs []interfac if cloudPrivateIPNotFound { // There could be one or more stale entry found in egress ip object, remove it by patching egressip // object with updated status. - err = eIPC.patchReplaceEgressIPStatus(egressIP.Name, updatedStatus) + err = eIPC.patchEgressIP(egressIP.Name, eIPC.generateEgressIPPatches(egressIP.Name, egressIP.Annotations, updatedStatus)...) if err != nil { return fmt.Errorf("syncCloudPrivateIPConfigs unable to update EgressIP status: %w", err) } @@ -1172,8 +1155,8 @@ func (eIPC *egressIPClusterController) getCloudPrivateIPConfigMap(objs []interfa // For Egress IPs that are hosted by secondary host networks, there must be at least // one node that hosts the network and exposed via the nodes host-cidrs annotation. func (eIPC *egressIPClusterController) assignEgressIPs(name string, egressIPs []string) []egressipv1.EgressIPStatusItem { - eIPC.allocator.Lock() - defer eIPC.allocator.Unlock() + eIPC.nodeAllocator.Lock() + defer eIPC.nodeAllocator.Unlock() assignments := []egressipv1.EgressIPStatusItem{} assignableNodes, existingAllocations := eIPC.getSortedEgressData() if len(assignableNodes) == 0 { @@ -1223,7 +1206,7 @@ func (eIPC *egressIPClusterController) assignEgressIPs(name string, egressIPs [] egressIP, status.Node, err) continue } - eNode, exists := eIPC.allocator.cache[status.Node] // allocator lock was previously acquired + eNode, exists := eIPC.nodeAllocator.cache[status.Node] // allocator lock was previously acquired if !exists { klog.Errorf("Failed to find entry in allocator cache for EgressIP %s and IP %s,", name, eIP.String()) continue @@ -1420,12 +1403,12 @@ func (eIPC *egressIPClusterController) isEgressIPAddrConflict(egressIP net.IP) ( // any other egress IP handler, so the cache should be warm and correct once we // start going this. func (eIPC *egressIPClusterController) validateEgressIPStatus(name string, items []egressipv1.EgressIPStatusItem) (map[egressipv1.EgressIPStatusItem]string, map[egressipv1.EgressIPStatusItem]string) { - eIPC.allocator.Lock() - defer eIPC.allocator.Unlock() + eIPC.nodeAllocator.Lock() + defer eIPC.nodeAllocator.Unlock() valid, invalid := make(map[egressipv1.EgressIPStatusItem]string), make(map[egressipv1.EgressIPStatusItem]string) for _, eIPStatus := range items { validAssignment := true - eNode, exists := eIPC.allocator.cache[eIPStatus.Node] + eNode, exists := eIPC.nodeAllocator.cache[eIPStatus.Node] if !exists { klog.Errorf("Allocator error: EgressIP: %s claims to have an allocation on a node which is unassignable for egress IP: %s", name, eIPStatus.Node) validAssignment = false @@ -1557,7 +1540,7 @@ func (eIPC *egressIPClusterController) reconcileCloudPrivateIPConfig(old, new *o updatedStatus = append(updatedStatus, status) } } - if err := eIPC.patchReplaceEgressIPStatus(egressIP.Name, updatedStatus); err != nil { + if err := eIPC.patchEgressIP(egressIP.Name, eIPC.generateEgressIPPatches(egressIP.Name, egressIP.Annotations, updatedStatus)...); err != nil { return err } } @@ -1604,7 +1587,7 @@ func (eIPC *egressIPClusterController) reconcileCloudPrivateIPConfig(old, new *o } if !hasStatus { statusToKeep := append(egressIP.Status.Items, statusItem) - if err := eIPC.patchReplaceEgressIPStatus(egressIP.Name, statusToKeep); err != nil { + if err := eIPC.patchEgressIP(egressIP.Name, eIPC.generateEgressIPPatches(egressIP.Name, egressIP.Annotations, statusToKeep)...); err != nil { return err } } @@ -1723,3 +1706,183 @@ func (eIPC *egressIPClusterController) removePendingOpsAndGetResyncs(egressIPNam } return resyncs, nil } + +// jsonPatchOperation contains all the info needed to perform a JSON path operation to a k8 object +type jsonPatchOperation struct { + Operation string `json:"op"` + Path string `json:"path"` + Value interface{} `json:"value,omitempty"` +} + +// patchEgressIP performs a patch operation on an EgressIP. +// There are two possible patches operations. +// 1. Mandatory, replace operation of egress IP status field. This allows us to +// update only the status field, without overwriting any other. This is +// important because processing egress IPs can take a while (when running on a +// public cloud and in the worst case), hence we don't want to perform a full +// object update which risks resetting the EgressIP object's fields to the state +// they had when we started processing the change. +// 2. Optional, add operation to its metadata.annotations field. +func (eIPC *egressIPClusterController) patchEgressIP(name string, patches ...jsonPatchOperation) error { + klog.Infof("Patching status on EgressIP %s: %v", name, patches) + op, err := json.Marshal(patches) + if err != nil { + return fmt.Errorf("error serializing patch operation: %+v, err: %v", patches, err) + } + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + return eIPC.kube.PatchEgressIP(name, op) + }) +} + +// generateEgressIPPatches conditionally generates a mark patch if the mark doesn't exist. If it fails to allocate a mark, +// log an error instead of failing because we do not wish to block primary default network egress IP assignments due to potential +// mark range exhaustion. Primary default network egress IP currently does not utilize marks to config EgressIP. +// Generating the status patch is mandatory +func (eIPC *egressIPClusterController) generateEgressIPPatches(name string, annotations map[string]string, + statusItems []egressipv1.EgressIPStatusItem) []jsonPatchOperation { + patches := make([]jsonPatchOperation, 0, 1) + if !util.IsEgressIPMarkSet(annotations) { + if mark, _, err := eIPC.getOrAllocMark(name); err != nil { + klog.Errorf("Failed to get mark for EgressIP %s: %v", name, err) + } else { + patches = append(patches, generateMarkPatchOp(mark)) + } + } + return append(patches, generateStatusPatchOp(statusItems)) +} + +func generateMarkPatchOp(mark int) jsonPatchOperation { + return jsonPatchOperation{ + Operation: "add", + Path: "/metadata/annotations", + Value: createAnnotWithMark(mark), + } +} + +func createAnnotWithMark(mark int) map[string]string { + return map[string]string{util.EgressIPMarkAnnotation: fmt.Sprintf("%d", mark)} +} + +func generateStatusPatchOp(statusItems []egressipv1.EgressIPStatusItem) jsonPatchOperation { + return jsonPatchOperation{ + Operation: "replace", + Path: "/status", + Value: egressipv1.EgressIPStatus{ + Items: statusItems, + }, + } +} + +// syncEgressIPMarkAllocator iterates over all existing EgressIPs. It builds a mark cache of existing marks stored on each +// EgressIP annotation or allocates and adds a new mark to an EgressIP if it doesn't exist +func (eIPC *egressIPClusterController) syncEgressIPMarkAllocator(egressIPs []interface{}) error { + // reserve previously assigned marks + for _, object := range egressIPs { + egressIP, ok := object.(*egressipv1.EgressIP) + if !ok { + return fmt.Errorf("failed to cast %T to *egressipv1.EgressIP", egressIP) + } + if !util.IsEgressIPMarkSet(egressIP.Annotations) { + continue + } + mark, err := util.ParseEgressIPMark(egressIP.Annotations) + if err != nil { + return fmt.Errorf("failed to get mark from EgressIP %s: %v", egressIP.Name, err) + } + if !mark.IsValid() { + return fmt.Errorf("EgressIP %s mark %q is invalid", egressIP.Name, mark.String()) + } + if err = eIPC.reserveMark(egressIP.Name, mark.ToInt()); err != nil { + return fmt.Errorf("failed to reserve mark for EgressIP %s: %v", egressIP.Name, err) + } + } + // assign new marks for EgressIPs without a mark + for _, object := range egressIPs { + egressIP, ok := object.(*egressipv1.EgressIP) + if !ok { + return fmt.Errorf("failed to cast %T to *egressipv1.EgressIP", egressIP) + } + if util.IsEgressIPMarkSet(egressIP.Annotations) { + continue + } + mark, releaseMarkFn, err := eIPC.getOrAllocMark(egressIP.Name) + if err != nil { + // Mark range is limited so do not return an error in-order not to block pods attached to the CDN + klog.Errorf("Failed to sync mark allocator: unable to allocate for EgressIP %s: %v", egressIP.Name, err) + } else { + if err = eIPC.patchEgressIP(egressIP.Name, generateMarkPatchOp(mark)); err != nil { + releaseMarkFn() + return fmt.Errorf("failed to patch EgressIP %s: %v", egressIP.Name, err) + } + } + } + return nil +} + +var ( + eipMarkMax = util.EgressIPMarkMax + eipMarkMin = util.EgressIPMarkBase +) + +func getEgressIPMarkAllocator() id.Allocator { + return id.NewIDAllocator("eip_mark", eipMarkMax-eipMarkMin) +} + +// ensureMark ensures that if a mark was remove or changed value, then restore the mark. +func (eIPC *egressIPClusterController) ensureMark(old, new *egressipv1.EgressIP) { + // Adding the mark to annotations is bundled with status update in-order to minimise updates, cover the case where there is no update to status + // and mark annotation has been modified / removed. This should only occur for an update and the mark was previous set. + if old != nil && new != nil { + if util.IsEgressIPMarkSet(old.Annotations) && util.EgressIPMarkAnnotationChanged(old.Annotations, new.Annotations) { + mark, _, err := eIPC.getOrAllocMark(new.Name) + if err != nil { + klog.Errorf("Failed to restore EgressIP %s mark because unable to retrieve mark: %v", new.Name, err) + } else if err = eIPC.patchEgressIP(new.Name, generateMarkPatchOp(mark)); err != nil { + klog.Errorf("Failed to restore EgressIP %s mark because patching failed: %v", new.Name, err) + } + } + } +} + +// getOrAllocMark allocates a new mark integer for name using round-robin strategy if none was already allocated for name otherwise +// returns the previously allocated mark. +// The mark is bounded by util.EgressIPMarkBase & util.EgressIPMarkMax inclusive. +// If range is exhausted, error is returned. Before calling this func, syncEgressIPMarkAllocator must be called +// to build the initial mark cache. Return func releases the mark in-case of error. +func (eIPC *egressIPClusterController) getOrAllocMark(name string) (int, func(), error) { + if name == "" { + return 0, nil, fmt.Errorf("EgressIP name cannot be blank") + } + mark, err := eIPC.markAllocator.AllocateID(name) + if err != nil { + return 0, nil, fmt.Errorf("failed to allocate mark for EgressIP %q: %v", name, err) + } + mark = mark + util.EgressIPMarkBase + if !util.IsEgressIPMarkValid(mark) { + eIPC.markAllocator.ReleaseID(name) + return 0, nil, fmt.Errorf("for EgressIP %s, mark %d allocated is invalid. Must be between %d and %d", name, mark, util.EgressIPMarkBase, util.EgressIPMarkMax) + } + return mark, func() { + eIPC.markAllocator.ReleaseID(name) + }, nil +} + +// deallocMark de-allocates a mark +func (eIPC *egressIPClusterController) deallocMark(name string) { + eIPC.markAllocator.ReleaseID(name) +} + +// reserveMark reserves a previously assigned mark to the mark cache +func (eIPC *egressIPClusterController) reserveMark(name string, mark int) error { + if name == "" { + return fmt.Errorf("name cannot be blank") + } + mark = mark - util.EgressIPMarkBase + if mark < 0 { + return fmt.Errorf("unable to reserve mark because calculated offset is less than zero") + } + if err := eIPC.markAllocator.ReserveID(name, mark); err != nil { + return fmt.Errorf("failed to reserve mark: %v", err) + } + return nil +} diff --git a/go-controller/pkg/clustermanager/egressip_controller_test.go b/go-controller/pkg/clustermanager/egressip_controller_test.go index 1e03813921..0573f7e17d 100644 --- a/go-controller/pkg/clustermanager/egressip_controller_test.go +++ b/go-controller/pkg/clustermanager/egressip_controller_test.go @@ -4,11 +4,10 @@ import ( "context" "fmt" "net" + "strconv" "time" - "github.com/onsi/ginkgo/extensions/table" - - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" ocpcloudnetworkapi "github.com/openshift/api/cloudnetwork/v1" ocpconfigapi "github.com/openshift/api/config/v1" @@ -21,6 +20,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8stypes "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/util/retry" utilnet "k8s.io/utils/net" ) @@ -165,15 +165,15 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { hccAllocator = &fakeEgressIPHealthClientAllocator{} getEgressIPAllocatorSizeSafely := func() int { - fakeClusterManagerOVN.eIPC.allocator.Lock() - defer fakeClusterManagerOVN.eIPC.allocator.Unlock() - return len(fakeClusterManagerOVN.eIPC.allocator.cache) + fakeClusterManagerOVN.eIPC.nodeAllocator.Lock() + defer fakeClusterManagerOVN.eIPC.nodeAllocator.Unlock() + return len(fakeClusterManagerOVN.eIPC.nodeAllocator.cache) } getEgressIPAllocatorSafely := func(s string, v6 bool) util.ParsedIFAddr { - fakeClusterManagerOVN.eIPC.allocator.Lock() - defer fakeClusterManagerOVN.eIPC.allocator.Unlock() - c, ok := fakeClusterManagerOVN.eIPC.allocator.cache[s] + fakeClusterManagerOVN.eIPC.nodeAllocator.Lock() + defer fakeClusterManagerOVN.eIPC.nodeAllocator.Unlock() + c, ok := fakeClusterManagerOVN.eIPC.nodeAllocator.cache[s] if !ok { panic(fmt.Sprintf("failed to find key %s", s)) } @@ -185,9 +185,9 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { } getEgressIPAllocatorReachableSafely := func(s string) bool { - fakeClusterManagerOVN.eIPC.allocator.Lock() - defer fakeClusterManagerOVN.eIPC.allocator.Unlock() - c, ok := fakeClusterManagerOVN.eIPC.allocator.cache[s] + fakeClusterManagerOVN.eIPC.nodeAllocator.Lock() + defer fakeClusterManagerOVN.eIPC.nodeAllocator.Unlock() + c, ok := fakeClusterManagerOVN.eIPC.nodeAllocator.cache[s] if !ok { panic(fmt.Sprintf("failed to find key %s", s)) } @@ -195,16 +195,16 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { } doesEgressIPAllocatorContainSafely := func(s string) bool { - fakeClusterManagerOVN.eIPC.allocator.Lock() - defer fakeClusterManagerOVN.eIPC.allocator.Unlock() - _, ok := fakeClusterManagerOVN.eIPC.allocator.cache[s] + fakeClusterManagerOVN.eIPC.nodeAllocator.Lock() + defer fakeClusterManagerOVN.eIPC.nodeAllocator.Unlock() + _, ok := fakeClusterManagerOVN.eIPC.nodeAllocator.cache[s] return ok } getEgressIPAllocatorHealthCheckSafely := func(s string) *fakeEgressIPHealthClient { - fakeClusterManagerOVN.eIPC.allocator.Lock() - defer fakeClusterManagerOVN.eIPC.allocator.Unlock() - c, ok := fakeClusterManagerOVN.eIPC.allocator.cache[s] + fakeClusterManagerOVN.eIPC.nodeAllocator.Lock() + defer fakeClusterManagerOVN.eIPC.nodeAllocator.Unlock() + c, ok := fakeClusterManagerOVN.eIPC.nodeAllocator.cache[s] if !ok { panic(fmt.Sprintf("failed to find node %s in allocator", s)) } @@ -231,6 +231,16 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { return egressIPs, nodes } + getEgressIPAnnotationValue := func(egressIPName string) func() (string, error) { + return func() (string, error) { + tmp, err := fakeClusterManagerOVN.fakeClient.EgressIPClient.K8sV1().EgressIPs().Get(context.TODO(), egressIPName, metav1.GetOptions{}) + if err != nil { + return "", err + } + return tmp.Annotations[util.EgressIPMarkAnnotation], nil + } + } + getEgressIPReassignmentCount := func() int { reAssignmentCount := 0 egressIPs, err := fakeClusterManagerOVN.fakeClient.EgressIPClient.K8sV1().EgressIPs().List(context.TODO(), metav1.ListOptions{}) @@ -245,9 +255,9 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { isEgressAssignableNode := func(nodeName string) func() bool { return func() bool { - fakeClusterManagerOVN.eIPC.allocator.Lock() - defer fakeClusterManagerOVN.eIPC.allocator.Unlock() - if item, exists := fakeClusterManagerOVN.eIPC.allocator.cache[nodeName]; exists { + fakeClusterManagerOVN.eIPC.nodeAllocator.Lock() + defer fakeClusterManagerOVN.eIPC.nodeAllocator.Unlock() + if item, exists := fakeClusterManagerOVN.eIPC.nodeAllocator.cache[nodeName]; exists { return item.isEgressAssignable } return false @@ -279,7 +289,7 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { }) ginkgo.Context("On node ADD/UPDATE/DELETE", func() { - table.DescribeTable("should re-assign EgressIPs and perform proper egressIP allocation changes", func(egressIP, expectedNetwork string) { + ginkgo.DescribeTable("should re-assign EgressIPs and perform proper egressIP allocation changes", func(egressIP, expectedNetwork string) { app.Action = func(ctx *cli.Context) error { node1IPv4OVN := "192.168.126.202/24" node1IPv4SecondaryHost := "10.10.10.3/24" @@ -362,8 +372,8 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Eventually(getEgressIPAllocatorSizeSafely).Should(gomega.Equal(2)) - gomega.Expect(fakeClusterManagerOVN.eIPC.allocator.cache).To(gomega.HaveKey(node1.Name)) - gomega.Expect(fakeClusterManagerOVN.eIPC.allocator.cache).To(gomega.HaveKey(node2.Name)) + gomega.Expect(fakeClusterManagerOVN.eIPC.nodeAllocator.cache).To(gomega.HaveKey(node1.Name)) + gomega.Expect(fakeClusterManagerOVN.eIPC.nodeAllocator.cache).To(gomega.HaveKey(node2.Name)) gomega.Eventually(isEgressAssignableNode(node1.Name)).Should(gomega.BeTrue()) gomega.Eventually(isEgressAssignableNode(node2.Name)).Should(gomega.BeFalse()) @@ -393,13 +403,13 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { app.Name, }) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }, table.Entry("OVN network", "192.168.126.101", "192.168.126.0/24"), - table.Entry("Secondary host network", "10.10.10.100", "10.10.10.0/24"), - table.Entry("Secondary host network", "7.7.7.100", "7.7.0.0/16"), - table.Entry("Secondary host network", "7.7.8.100", "7.7.0.0/16"), + }, ginkgo.Entry("OVN network", "192.168.126.101", "192.168.126.0/24"), + ginkgo.Entry("Secondary host network", "10.10.10.100", "10.10.10.0/24"), + ginkgo.Entry("Secondary host network", "7.7.7.100", "7.7.0.0/16"), + ginkgo.Entry("Secondary host network", "7.7.8.100", "7.7.0.0/16"), ) - table.DescribeTable("should re-assign EgressIPs and perform proper egressIP allocation changes during node deletion", func(egressIP, expectedNetwork string) { + ginkgo.DescribeTable("should re-assign EgressIPs and perform proper egressIP allocation changes during node deletion", func(egressIP, expectedNetwork string) { app.Action = func(ctx *cli.Context) error { node1IPv4OVN := "192.168.126.202/24" node1IPv4SecondaryHost := "10.10.10.3/24" @@ -483,8 +493,8 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Eventually(getEgressIPAllocatorSizeSafely).Should(gomega.Equal(2)) - gomega.Expect(fakeClusterManagerOVN.eIPC.allocator.cache).To(gomega.HaveKey(node1.Name)) - gomega.Expect(fakeClusterManagerOVN.eIPC.allocator.cache).To(gomega.HaveKey(node2.Name)) + gomega.Expect(fakeClusterManagerOVN.eIPC.nodeAllocator.cache).To(gomega.HaveKey(node1.Name)) + gomega.Expect(fakeClusterManagerOVN.eIPC.nodeAllocator.cache).To(gomega.HaveKey(node2.Name)) gomega.Eventually(isEgressAssignableNode(node1.Name)).Should(gomega.BeTrue()) gomega.Eventually(isEgressAssignableNode(node2.Name)).Should(gomega.BeFalse()) @@ -511,10 +521,10 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }, table.Entry("OVN network", "192.168.126.101", "192.168.126.0/24"), - table.Entry("Secondary host network", "10.10.10.100", "10.10.10.0/24"), - table.Entry("Secondary host network", "7.7.7.100", "7.7.0.0/16"), - table.Entry("Secondary host network", "7.7.8.100", "7.7.0.0/16"), + }, ginkgo.Entry("OVN network", "192.168.126.101", "192.168.126.0/24"), + ginkgo.Entry("Secondary host network", "10.10.10.100", "10.10.10.0/24"), + ginkgo.Entry("Secondary host network", "7.7.7.100", "7.7.0.0/16"), + ginkgo.Entry("Secondary host network", "7.7.8.100", "7.7.0.0/16"), ) ginkgo.It("should assign EgressIPs to a linux node when there are windows nodes in the cluster", func() { app.Action = func(ctx *cli.Context) error { @@ -603,7 +613,7 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Eventually(getEgressIPAllocatorSizeSafely).Should(gomega.Equal(1)) - gomega.Expect(fakeClusterManagerOVN.eIPC.allocator.cache).To(gomega.HaveKey(node1.Name)) + gomega.Expect(fakeClusterManagerOVN.eIPC.nodeAllocator.cache).To(gomega.HaveKey(node1.Name)) gomega.Eventually(isEgressAssignableNode(node1.Name)).Should(gomega.BeTrue()) gomega.Eventually(getEgressIPStatusLen(egressIPName)).Should(gomega.Equal(1)) @@ -937,7 +947,7 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { ) allocatorItems := func() int { - return len(fakeClusterManagerOVN.eIPC.allocator.cache) + return len(fakeClusterManagerOVN.eIPC.nodeAllocator.cache) } _, err := fakeClusterManagerOVN.eIPC.WatchEgressNodes() @@ -1242,8 +1252,8 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Eventually(getEgressIPAllocatorSizeSafely).Should(gomega.Equal(2)) - gomega.Expect(fakeClusterManagerOVN.eIPC.allocator.cache).To(gomega.HaveKey(node1.Name)) - gomega.Expect(fakeClusterManagerOVN.eIPC.allocator.cache).To(gomega.HaveKey(node2.Name)) + gomega.Expect(fakeClusterManagerOVN.eIPC.nodeAllocator.cache).To(gomega.HaveKey(node1.Name)) + gomega.Expect(fakeClusterManagerOVN.eIPC.nodeAllocator.cache).To(gomega.HaveKey(node2.Name)) gomega.Eventually(getEgressIPStatusLen(egressIPName)).Should(gomega.Equal(0)) gomega.Eventually(fakeClusterManagerOVN.fakeRecorder.Events).Should(gomega.HaveLen(3)) @@ -1341,7 +1351,7 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { gomega.Eventually(getEgressIPStatusLen(egressIPName)).Should(gomega.Equal(1)) gomega.Eventually(getEgressIPReassignmentCount).Should(gomega.Equal(0)) - gomega.Expect(fakeClusterManagerOVN.eIPC.allocator.cache).To(gomega.HaveKey(node2.Name)) + gomega.Expect(fakeClusterManagerOVN.eIPC.nodeAllocator.cache).To(gomega.HaveKey(node2.Name)) return nil } @@ -1423,8 +1433,8 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { _, ip2V4Sub, err := net.ParseCIDR(node2IPv4) gomega.Eventually(getEgressIPAllocatorSizeSafely).Should(gomega.Equal(2)) - gomega.Expect(fakeClusterManagerOVN.eIPC.allocator.cache).To(gomega.HaveKey(node1.Name)) - gomega.Expect(fakeClusterManagerOVN.eIPC.allocator.cache).To(gomega.HaveKey(node2.Name)) + gomega.Expect(fakeClusterManagerOVN.eIPC.nodeAllocator.cache).To(gomega.HaveKey(node1.Name)) + gomega.Expect(fakeClusterManagerOVN.eIPC.nodeAllocator.cache).To(gomega.HaveKey(node2.Name)) gomega.Eventually(isEgressAssignableNode(node1.Name)).Should(gomega.BeFalse()) gomega.Eventually(isEgressAssignableNode(node2.Name)).Should(gomega.BeFalse()) gomega.Expect(getEgressIPAllocatorSafely(node1.Name, false).Net).To(gomega.Equal(ip1V4Sub)) @@ -1539,7 +1549,7 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Eventually(getEgressIPAllocatorSizeSafely).Should(gomega.Equal(1)) - gomega.Expect(fakeClusterManagerOVN.eIPC.allocator.cache).To(gomega.HaveKey(node1.Name)) + gomega.Expect(fakeClusterManagerOVN.eIPC.nodeAllocator.cache).To(gomega.HaveKey(node1.Name)) gomega.Eventually(getEgressIPStatusLen(egressIPName)).Should(gomega.Equal(1)) egressIPs, nodes := getEgressIPStatus(egressIPName) gomega.Expect(nodes[0]).To(gomega.Equal(node1.Name)) @@ -1552,14 +1562,14 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { gomega.Expect(nodes[0]).To(gomega.Equal(node1.Name)) gomega.Expect(egressIPs[0]).To(gomega.Equal(egressIP)) gomega.Eventually(getEgressIPAllocatorSizeSafely).Should(gomega.Equal(2)) - gomega.Expect(fakeClusterManagerOVN.eIPC.allocator.cache).To(gomega.HaveKey(node1.Name)) - gomega.Expect(fakeClusterManagerOVN.eIPC.allocator.cache).To(gomega.HaveKey(node2.Name)) + gomega.Expect(fakeClusterManagerOVN.eIPC.nodeAllocator.cache).To(gomega.HaveKey(node1.Name)) + gomega.Expect(fakeClusterManagerOVN.eIPC.nodeAllocator.cache).To(gomega.HaveKey(node2.Name)) err = fakeClusterManagerOVN.fakeClient.KubeClient.CoreV1().Nodes().Delete(context.TODO(), node1.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Eventually(getEgressIPAllocatorSizeSafely).Should(gomega.Equal(1)) - gomega.Expect(fakeClusterManagerOVN.eIPC.allocator.cache).ToNot(gomega.HaveKey(node1.Name)) - gomega.Expect(fakeClusterManagerOVN.eIPC.allocator.cache).To(gomega.HaveKey(node2.Name)) + gomega.Expect(fakeClusterManagerOVN.eIPC.nodeAllocator.cache).ToNot(gomega.HaveKey(node1.Name)) + gomega.Expect(fakeClusterManagerOVN.eIPC.nodeAllocator.cache).To(gomega.HaveKey(node2.Name)) gomega.Eventually(getEgressIPStatusLen(egressIPName)).Should(gomega.Equal(1)) getNewNode := func() string { @@ -1638,7 +1648,7 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { gomega.Eventually(getEgressIPStatusLen(eIP1.Name)).Should(gomega.Equal(1)) egressIPs, _ := getEgressIPStatus(eIP1.Name) gomega.Expect(egressIPs[0]).To(gomega.Equal(egressIP)) - hcClient := fakeClusterManagerOVN.eIPC.allocator.cache[node.Name].healthClient.(*fakeEgressIPHealthClient) + hcClient := fakeClusterManagerOVN.eIPC.nodeAllocator.cache[node.Name].healthClient.(*fakeEgressIPHealthClient) hcClient.FakeProbeFailure = true // explicitly call check reachability, periodic checker is not active checkEgressNodesReachabilityIterate(fakeClusterManagerOVN.eIPC) @@ -1650,7 +1660,7 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Eventually(hcClient.IsConnected()).Should(gomega.Equal(true)) // the node should not be marked as reachable in the update handler as it is not getting added - gomega.Consistently(func() bool { return fakeClusterManagerOVN.eIPC.allocator.cache[node.Name].isReachable }).Should(gomega.Equal(false)) + gomega.Consistently(func() bool { return fakeClusterManagerOVN.eIPC.nodeAllocator.cache[node.Name].isReachable }).Should(gomega.Equal(false)) // egress IP should get assigned on the next checkEgressNodesReachabilityIterate call // explicitly call check reachability, periodic checker is not active @@ -1727,8 +1737,8 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { egressNode1 := setupNode(node1Name, []string{"0:0:0:0:0:feff:c0a8:8e0c/64"}, map[string]string{"0:0:0:0:0:feff:c0a8:8e32": "bogus1", "0:0:0:0:0:feff:c0a8:8e1e": "bogus2"}) egressNode2 := setupNode(node2Name, []string{"0:0:0:0:0:fedf:c0a8:8e0c/64"}, map[string]string{"0:0:0:0:0:feff:c0a8:8e23": "bogus3"}) - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode1.name] = &egressNode1 - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode2.name] = &egressNode2 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode1.name] = &egressNode1 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode2.name] = &egressNode2 eIP := egressipv1.EgressIP{ ObjectMeta: newEgressIPMeta(egressIPName), @@ -1748,7 +1758,7 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) - table.DescribeTable("should be able to allocate several EgressIPs and avoid the same node", func(egressIP1, egressIP2 string) { + ginkgo.DescribeTable("should be able to allocate several EgressIPs and avoid the same node", func(egressIP1, egressIP2 string) { app.Action = func(ctx *cli.Context) error { node1IPv4OVN := "" node1IPv6OVN := "0:0:0:0:0:feff:c0a8:8e0c/64" @@ -1815,8 +1825,8 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { egressNode1 := setupNode(node1Name, []string{node1IPv6OVN}, map[string]string{"0:0:0:0:0:feff:c0a8:8e32": "bogus1", "0:0:0:0:0:feff:c0a8:8e1e": "bogus2"}) egressNode2 := setupNode(node2Name, []string{node2IPv6OVN}, map[string]string{"0:0:0:0:0:feff:c0a8:8e23": "bogus3"}) - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode1.name] = &egressNode1 - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode2.name] = &egressNode2 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode1.name] = &egressNode1 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode2.name] = &egressNode2 assignedStatuses := fakeClusterManagerOVN.eIPC.assignEgressIPs(eIP.Name, eIP.Spec.EgressIPs) gomega.Expect(assignedStatuses).To(gomega.HaveLen(2)) @@ -1829,11 +1839,11 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }, table.Entry("OVN egress IPs", "0:0:0:0:0:feff:c0a8:8e0d", "0:0:0:0:0:feff:c0a8:8e0f"), - table.Entry("Secondary host egress IPs", "0:0:1:0:0:fecf:c0a8:8e0d", "0:0:1:0:0:febf:c0a8:8e0f"), - table.Entry("OVN and secondary host egress IPs", "0:0:1:0:0:fecf:c0a8:8e0d", "0:0:0:0:0:feff:c0a8:8e0f")) + }, ginkgo.Entry("OVN egress IPs", "0:0:0:0:0:feff:c0a8:8e0d", "0:0:0:0:0:feff:c0a8:8e0f"), + ginkgo.Entry("Secondary host egress IPs", "0:0:1:0:0:fecf:c0a8:8e0d", "0:0:1:0:0:febf:c0a8:8e0f"), + ginkgo.Entry("OVN and secondary host egress IPs", "0:0:1:0:0:fecf:c0a8:8e0d", "0:0:0:0:0:feff:c0a8:8e0f")) - table.DescribeTable("should be able to allocate several EgressIPs and avoid the same node and leave one un-assigned without error", func(egressIP1, egressIP2, egressIP3 string) { + ginkgo.DescribeTable("should be able to allocate several EgressIPs and avoid the same node and leave one un-assigned without error", func(egressIP1, egressIP2, egressIP3 string) { app.Action = func(ctx *cli.Context) error { node1IPv4OVN := "" node1IPv6OVN := "0:0:0:0:0:feff:c0a8:8e0c/64" @@ -1900,8 +1910,8 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { egressNode1 := setupNode(node1Name, []string{node1IPv4OVN}, map[string]string{"0:0:0:0:0:feff:c0a8:8e32": "bogus1", "0:0:0:0:0:feff:c0a8:8e1e": "bogus2"}) egressNode2 := setupNode(node2Name, []string{node2IPv4OVN}, map[string]string{"0:0:0:0:0:feff:c0a8:8e23": "bogus3"}) - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode1.name] = &egressNode1 - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode2.name] = &egressNode2 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode1.name] = &egressNode1 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode2.name] = &egressNode2 gomega.Expect(fakeClusterManagerOVN.eIPC.initEgressIPAllocator(&node1)).To(gomega.Succeed()) gomega.Expect(fakeClusterManagerOVN.eIPC.initEgressIPAllocator(&node2)).To(gomega.Succeed()) @@ -1917,8 +1927,8 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }, table.Entry("OVN network", "0:0:0:0:0:feff:c0a8:8e0d", "0:0:0:0:0:feff:c0a8:8e0e", "0:0:0:0:0:feff:c0a8:8e0f"), - table.Entry("Secondary host network", "0:0:1:0:0:feff:c0a8:8e0d", "0:0:1:0:0:feff:c0a8:8e0e", "0:0:1:0:0:feff:c0a8:8e0f"), + }, ginkgo.Entry("OVN network", "0:0:0:0:0:feff:c0a8:8e0d", "0:0:0:0:0:feff:c0a8:8e0e", "0:0:0:0:0:feff:c0a8:8e0f"), + ginkgo.Entry("Secondary host network", "0:0:1:0:0:feff:c0a8:8e0d", "0:0:1:0:0:feff:c0a8:8e0e", "0:0:1:0:0:feff:c0a8:8e0f"), ) ginkgo.It("should be able to allocate several EgressIPs for OVN and Secondary host networks and avoid the same node and leave multiple EgressIPs un-assigned without error", func() { @@ -1994,8 +2004,8 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { egressNode1 := setupNode(node1Name, []string{node1IPv4OVN}, map[string]string{"0:0:0:0:0:feff:c0a8:8e32": "bogus1", "0:0:0:0:0:feff:c0a8:8e1e": "bogus2"}) egressNode2 := setupNode(node2Name, []string{node2IPv4OVN}, map[string]string{"0:0:0:0:0:feff:c0a8:8e23": "bogus3"}) - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode1.name] = &egressNode1 - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode2.name] = &egressNode2 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode1.name] = &egressNode1 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode2.name] = &egressNode2 gomega.Expect(fakeClusterManagerOVN.eIPC.initEgressIPAllocator(&node1)).To(gomega.Succeed()) gomega.Expect(fakeClusterManagerOVN.eIPC.initEgressIPAllocator(&node2)).To(gomega.Succeed()) @@ -2013,7 +2023,7 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) - table.DescribeTable("should return the already allocated IP with the same node if it is allocated again", func(egressIP string) { + ginkgo.DescribeTable("should return the already allocated IP with the same node if it is allocated again", func(egressIP string) { app.Action = func(ctx *cli.Context) error { node1IPv4OVN := "" node1IPv6OVN := "0:0:0:0:0:feff:c0a8:8e0c/64" @@ -2080,8 +2090,8 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { egressNode1 := setupNode(node1Name, []string{node1IPv4OVN}, map[string]string{"0:0:0:0:0:feff:c0a8:8e32": "bogus1", "0:0:0:0:0:feff:c0a8:8e1e": "bogus2"}) egressNode2 := setupNode(node2Name, []string{node2IPv4OVN}, map[string]string{"0:0:0:0:0:feff:c0a8:8e23": "bogus3"}) - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode1.name] = &egressNode1 - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode2.name] = &egressNode2 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode1.name] = &egressNode1 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode2.name] = &egressNode2 gomega.Expect(fakeClusterManagerOVN.eIPC.initEgressIPAllocator(&node1)).To(gomega.Succeed()) gomega.Expect(fakeClusterManagerOVN.eIPC.initEgressIPAllocator(&node2)).To(gomega.Succeed()) @@ -2096,8 +2106,8 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }, table.Entry("OVN network", "0:0:0:0:0:feff:c0a8:8e1a"), - table.Entry("Secondary host network", "0:0:1:0:0:feff:c0a8:8e0d"), + }, ginkgo.Entry("OVN network", "0:0:0:0:0:feff:c0a8:8e1a"), + ginkgo.Entry("Secondary host network", "0:0:1:0:0:feff:c0a8:8e0d"), ) ginkgo.It("should not be able to allocate node IP", func() { @@ -2167,8 +2177,8 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { egressNode1 := setupNode(node1Name, []string{egressIP + "/64"}, map[string]string{"0:0:0:0:0:feff:c0a8:8e32": "bogus1", "0:0:0:0:0:feff:c0a8:8e1e": "bogus2"}) egressNode2 := setupNode(node2Name, []string{"0:0:0:0:0:fedf:c0a8:8e0c/64"}, map[string]string{"0:0:0:0:0:feff:c0a8:8e23": "bogus3"}) - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode1.name] = &egressNode1 - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode2.name] = &egressNode2 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode1.name] = &egressNode1 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode2.name] = &egressNode2 assignedStatuses := fakeClusterManagerOVN.eIPC.assignEgressIPs(eIP.Name, eIP.Spec.EgressIPs) gomega.Expect(assignedStatuses).To(gomega.HaveLen(0)) @@ -2248,8 +2258,8 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { egressNode1 := setupNode(node1Name, []string{node1IPv6, node1IPv62}, map[string]string{"0:0:0:0:0:feff:c0a8:8e32": "bogus1", "0:0:0:0:0:feff:c0a8:8e1e": "bogus2"}) egressNode2 := setupNode(node2Name, []string{node2IPv6}, map[string]string{"0:0:0:0:0:feff:c0a8:8e23": "bogus3"}) - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode1.name] = &egressNode1 - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode2.name] = &egressNode2 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode1.name] = &egressNode1 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode2.name] = &egressNode2 gomega.Expect(fakeClusterManagerOVN.eIPC.initEgressIPAllocator(&node1)).To(gomega.Succeed()) gomega.Expect(fakeClusterManagerOVN.eIPC.initEgressIPAllocator(&node2)).To(gomega.Succeed()) @@ -2329,8 +2339,8 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { egressNode1 := setupNode(node1Name, []string{"0:0:0:0:0:feff:c0a8:8e0c/64"}, map[string]string{"0:0:0:0:0:feff:c0a8:8e32": "bogus1", "0:0:0:0:0:feff:c0a8:8e1e": "bogus2"}) egressNode2 := setupNode(node2Name, []string{"0:0:0:0:0:fedf:c0a8:8e0c/64"}, map[string]string{"0:0:0:0:0:feff:c0a8:8e23": "bogus3"}) - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode1.name] = &egressNode1 - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode2.name] = &egressNode2 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode1.name] = &egressNode1 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode2.name] = &egressNode2 assignedStatuses := fakeClusterManagerOVN.eIPC.assignEgressIPs(eIP.Name, eIP.Spec.EgressIPs) gomega.Expect(assignedStatuses).To(gomega.HaveLen(0)) @@ -2408,8 +2418,8 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { egressNode1 := setupNode(node1Name, []string{"0:0:0:0:0:feff:c0a8:8e0c/64"}, map[string]string{"0:0:0:0:0:feff:c0a8:8e32": "bogus1", "0:0:0:0:0:feff:c0a8:8e1e": "bogus2"}) egressNode2 := setupNode(node2Name, []string{"0:0:0:0:0:fedf:c0a8:8e0c/64"}, map[string]string{"0:0:0:0:0:feff:c0a8:8e23": "bogus3"}) - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode1.name] = &egressNode1 - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode2.name] = &egressNode2 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode1.name] = &egressNode1 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode2.name] = &egressNode2 assignedStatuses := fakeClusterManagerOVN.eIPC.assignEgressIPs(eIP.Name, eIP.Spec.EgressIPs) gomega.Expect(assignedStatuses).To(gomega.HaveLen(0)) @@ -2487,8 +2497,8 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { egressNode1 := setupNode(node1Name, []string{"0:0:0:0:0:feff:c0a8:8e0c/64"}, map[string]string{"0:0:0:0:0:feff:c0a8:8e32": "bogus1", "0:0:0:0:0:feff:c0a8:8e1e": "bogus2"}) egressNode2 := setupNode(node2Name, []string{"0:0:0:0:0:fedf:c0a8:8e0c/64"}, map[string]string{"0:0:0:0:0:feff:c0a8:8e23": "bogus3"}) - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode1.name] = &egressNode1 - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode2.name] = &egressNode2 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode1.name] = &egressNode1 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode2.name] = &egressNode2 assignedStatuses := fakeClusterManagerOVN.eIPC.assignEgressIPs(eIP.Name, eIP.Spec.EgressIPs) gomega.Expect(assignedStatuses).To(gomega.HaveLen(1)) @@ -2568,8 +2578,8 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { egressNode1 := setupNode(node1Name, []string{"0:0:0:0:0:feff:c0a8:8e0c/64"}, map[string]string{"0:0:0:0:0:feff:c0a8:8e32": "bogus1", "0:0:0:0:0:feff:c0a8:8e1e": "bogus2"}) egressNode2 := setupNode(node2Name, []string{"0:0:0:0:0:fedf:c0a8:8e0c/64"}, map[string]string{"0:0:0:0:0:feff:c0a8:8e23": "bogus3"}) - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode1.name] = &egressNode1 - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode2.name] = &egressNode2 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode1.name] = &egressNode1 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode2.name] = &egressNode2 assignedStatuses := fakeClusterManagerOVN.eIPC.assignEgressIPs(eIP.Name, eIP.Spec.EgressIPs) gomega.Expect(assignedStatuses).To(gomega.HaveLen(0)) @@ -2724,8 +2734,8 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { egressNode1 := setupNode(node1Name, []string{"0:0:0:0:0:feff:c0a8:8e0c/64"}, map[string]string{"0:0:0:0:0:feff:c0a8:8e23": "bogus1"}) egressNode2 := setupNode(node2Name, []string{"192.168.126.51/24"}, map[string]string{"192.168.126.68": "bogus1", "192.168.126.102": "bogus2"}) - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode1.name] = &egressNode1 - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode2.name] = &egressNode2 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode1.name] = &egressNode1 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode2.name] = &egressNode2 assignedStatuses := fakeClusterManagerOVN.eIPC.assignEgressIPs(eIP.Name, eIP.Spec.EgressIPs) gomega.Expect(assignedStatuses).To(gomega.HaveLen(1)) @@ -2807,8 +2817,8 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { egressNode1 := setupNode(node1Name, []string{node1IPv4}, map[string]string{"192.168.126.102": "bogus1", "192.168.126.111": "bogus2"}) egressNode2 := setupNode(node2Name, []string{node2IPv4}, map[string]string{"192.168.126.68": "bogus3"}) - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode1.name] = &egressNode1 - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode2.name] = &egressNode2 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode1.name] = &egressNode1 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode2.name] = &egressNode2 validatedIPs, err := fakeClusterManagerOVN.eIPC.validateEgressIPSpec(eIP.Name, eIP.Spec.EgressIPs) gomega.Expect(err).To(gomega.HaveOccurred()) @@ -2894,8 +2904,8 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { egressNode1 := setupNode(node1Name, []string{node1IPv4}, map[string]string{"192.168.126.102": "bogus1", "192.168.126.111": "bogus2"}) egressNode2 := setupNode(node2Name, []string{node2IPv4}, map[string]string{"192.168.126.68": "bogus3"}) - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode1.name] = &egressNode1 - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode2.name] = &egressNode2 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode1.name] = &egressNode1 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode2.name] = &egressNode2 _, err := fakeClusterManagerOVN.eIPC.WatchEgressIP() gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2978,8 +2988,8 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { egressNode1 := setupNode(node1Name, []string{"0:0:0:0:0:feff:c0a8:8e0c/64"}, map[string]string{"0:0:0:0:0:feff:c0a8:8e32": "bogus1", "0:0:0:0:0:feff:c0a8:8e1e": "bogus2"}) egressNode2 := setupNode(node2Name, []string{"0:0:0:0:0:fedf:c0a8:8e0c/64"}, map[string]string{"0:0:0:0:0:feff:c0a8:8e23": "bogus3"}) - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode1.name] = &egressNode1 - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode2.name] = &egressNode2 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode1.name] = &egressNode1 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode2.name] = &egressNode2 _, err := fakeClusterManagerOVN.eIPC.WatchEgressIP() gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3059,8 +3069,8 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { egressNode1 := setupNode(node1Name, []string{node1IPv6}, map[string]string{"0:0:0:0:0:feff:c0a8:8e23": "bogus1"}) egressNode2 := setupNode(node2Name, []string{node2IPv4}, map[string]string{"192.168.126.68": "bogus2", "192.168.126.102": "bogus3"}) - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode1.name] = &egressNode1 - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode2.name] = &egressNode2 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode1.name] = &egressNode1 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode2.name] = &egressNode2 _, err := fakeClusterManagerOVN.eIPC.WatchEgressIP() gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3074,6 +3084,123 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) + + ginkgo.It("should manage mark cache for add/update/delete", func() { + app.Action = func(ctx *cli.Context) error { + egressIP := "192.168.126.10" + node1IPv4 := "192.168.126.12/24" + node2IPv4 := "192.168.126.51/24" + + node1 := v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: node1Name, + Annotations: map[string]string{ + "k8s.ovn.org/node-primary-ifaddr": fmt.Sprintf("{\"ipv4\": \"%s\", \"ipv6\": \"%s\"}", node1IPv4, ""), + "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":[\"%s\", \"%s\"]}", v4NodeSubnet, v6NodeSubnet), + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4), + }, + Labels: map[string]string{ + "k8s.ovn.org/egress-assignable": "", + }, + }, + Status: v1.NodeStatus{ + Conditions: []v1.NodeCondition{ + { + Type: v1.NodeReady, + Status: v1.ConditionTrue, + }, + }, + }, + } + node2 := v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: node2Name, + Annotations: map[string]string{ + "k8s.ovn.org/node-primary-ifaddr": fmt.Sprintf("{\"ipv4\": \"%s\", \"ipv6\": \"%s\"}", node2IPv4, ""), + "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\": [\"%s\",\"%s\"]}", v4NodeSubnet, v6NodeSubnet), + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4), + }, + Labels: map[string]string{ + "k8s.ovn.org/egress-assignable": "", + }, + }, + Status: v1.NodeStatus{ + Conditions: []v1.NodeCondition{ + { + Type: v1.NodeReady, + Status: v1.ConditionTrue, + }, + }, + }, + } + + eIP := &egressipv1.EgressIP{ + ObjectMeta: newEgressIPMeta(egressIPName), + Spec: egressipv1.EgressIPSpec{ + EgressIPs: []string{egressIP}, + NamespaceSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "name": "does-not-exist", + }, + }, + }, + } + + fakeClusterManagerOVN.start( + &v1.NodeList{Items: []v1.Node{node1, node2}}, + ) + + egressNode1 := setupNode(node1Name, []string{node1IPv4}, map[string]string{"192.168.126.102": "bogus1", "192.168.126.111": "bogus2"}) + egressNode2 := setupNode(node2Name, []string{node2IPv4}, map[string]string{"192.168.126.68": "bogus3"}) + + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode1.name] = &egressNode1 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode2.name] = &egressNode2 + + _, err := fakeClusterManagerOVN.eIPC.WatchEgressIP() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + eIP, err = fakeClusterManagerOVN.fakeClient.EgressIPClient.K8sV1().EgressIPs().Create(context.TODO(), eIP, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create EgressIP") + + gomega.Eventually(getEgressIPAnnotationValue(eIP.Name)).ShouldNot(gomega.BeEmpty()) + assignedMarkStr, err := getEgressIPAnnotationValue(eIP.Name)() + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "failed to get egress IP mark from annotations") + assignedMark, err := strconv.Atoi(assignedMarkStr) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "failed to convert mark to string") + + ginkgo.By("clear mark to cause update and expect restoration of mark") + gomega.Expect(retry.RetryOnConflict(retry.DefaultRetry, func() error { + eIP, err := fakeClusterManagerOVN.fakeClient.EgressIPClient.K8sV1().EgressIPs().Get(context.TODO(), eIP.Name, metav1.GetOptions{}) + if err != nil { + return err + } + eIP.Annotations = map[string]string{} + eIP, err = fakeClusterManagerOVN.fakeClient.EgressIPClient.K8sV1().EgressIPs().Update(context.TODO(), eIP, metav1.UpdateOptions{}) + return err + })).ShouldNot(gomega.HaveOccurred(), "failed to update EgressIP object") + ginkgo.By("confirm the original mark is restored") + gomega.Eventually(getEgressIPAnnotationValue(eIP.Name)).ShouldNot(gomega.BeEmpty()) + assignedMarkStr, err = getEgressIPAnnotationValue(eIP.Name)() + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "failed to get egress IP mark from annotations") + assignedMarkAfterUpdate, err := strconv.Atoi(assignedMarkStr) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "failed to convert mark to string") + gomega.Expect(assignedMark).Should(gomega.Equal(assignedMarkAfterUpdate), "Mark should be identical if annotation is cleared") + ginkgo.By("confirm cache is unchanged") + cachedMark, _, err := fakeClusterManagerOVN.eIPC.getOrAllocMark(eIP.Name) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + gomega.Expect(cachedMark).Should(gomega.Equal(assignedMark), "EIP annotation and cache mark integer must be the same") + ginkgo.By("deleted EgressIP removes cache entry") + err = fakeClusterManagerOVN.fakeClient.EgressIPClient.K8sV1().EgressIPs().Delete(context.TODO(), eIP.Name, metav1.DeleteOptions{}) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "Deletion of EIP must succeed") + // if reserve ID succeeds, then the previous entry was removed. + gomega.Eventually(func() error { + return fakeClusterManagerOVN.eIPC.markAllocator.ReserveID(eIP.Name, 9999) + }).Should(gomega.Succeed()) + return nil + } + err := app.Run([]string{app.Name}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) }) ginkgo.Context("syncEgressIP for dual-stack", func() { @@ -3159,8 +3286,8 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { &egressipv1.EgressIPList{Items: []egressipv1.EgressIP{eIP}}, ) - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode1.name] = &egressNode1 - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode2.name] = &egressNode2 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode1.name] = &egressNode1 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode2.name] = &egressNode2 _, err := fakeClusterManagerOVN.eIPC.WatchEgressIP() gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3258,8 +3385,8 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { &egressipv1.EgressIPList{Items: []egressipv1.EgressIP{eIP}}, ) - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode1.name] = &egressNode1 - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode2.name] = &egressNode2 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode1.name] = &egressNode1 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode2.name] = &egressNode2 _, err := fakeClusterManagerOVN.eIPC.WatchEgressIP() gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3347,8 +3474,8 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { &egressipv1.EgressIPList{Items: []egressipv1.EgressIP{eIP}}, ) - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode1.name] = &egressNode1 - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode2.name] = &egressNode2 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode1.name] = &egressNode1 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode2.name] = &egressNode2 _, err := fakeClusterManagerOVN.eIPC.WatchEgressIP() gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3436,8 +3563,8 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { &egressipv1.EgressIPList{Items: []egressipv1.EgressIP{eIP}}, ) - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode1.name] = &egressNode1 - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode2.name] = &egressNode2 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode1.name] = &egressNode1 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode2.name] = &egressNode2 _, err := fakeClusterManagerOVN.eIPC.WatchEgressIP() gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3542,8 +3669,8 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { }, ) - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode1.name] = &egressNode1 - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode2.name] = &egressNode2 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode1.name] = &egressNode1 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode2.name] = &egressNode2 _, err := fakeClusterManagerOVN.eIPC.WatchEgressIP() gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3650,8 +3777,8 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { }, ) - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode1.name] = &egressNode1 - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode2.name] = &egressNode2 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode1.name] = &egressNode1 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode2.name] = &egressNode2 _, err := fakeClusterManagerOVN.eIPC.WatchEgressIP() gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3754,8 +3881,8 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { }, ) - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode1.name] = &egressNode1 - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode2.name] = &egressNode2 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode1.name] = &egressNode1 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode2.name] = &egressNode2 _, err := fakeClusterManagerOVN.eIPC.WatchEgressIP() gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3842,8 +3969,8 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { &egressipv1.EgressIPList{Items: []egressipv1.EgressIP{eIP}}, ) - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode1.name] = &egressNode1 - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode2.name] = &egressNode2 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode1.name] = &egressNode1 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode2.name] = &egressNode2 _, err := fakeClusterManagerOVN.eIPC.WatchEgressIP() gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3857,6 +3984,48 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) + + ginkgo.It("should assign new marks and add existing marks to mark cache", func() { + app.Action = func(ctx *cli.Context) error { + eIPMark := util.EgressIPMarkBase + 100 // mark to be added to EIP obj before sync func is called + eIPMarkStr := fmt.Sprintf("%d", eIPMark) + objMeta := newEgressIPMeta(egressIPName) + objMeta.Annotations = createAnnotWithMark(eIPMark) + eIPWithMark := egressipv1.EgressIP{ObjectMeta: objMeta} + eIP2WithoutMark := egressipv1.EgressIP{ObjectMeta: newEgressIPMeta(egressIPName2)} + fakeClusterManagerOVN.start(&egressipv1.EgressIPList{Items: []egressipv1.EgressIP{eIPWithMark, eIP2WithoutMark}}) + _, err := fakeClusterManagerOVN.eIPC.WatchEgressIP() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("ensure existing mark value isn't altered") + gomega.Consistently(func() string { + mark, err := getEgressIPAnnotationValue(eIPWithMark.Name)() + gomega.Expect(err).Should(gomega.Succeed(), "failed to get mark from annotation") + return mark + }).Should(gomega.Equal(eIPMarkStr)) + ginkgo.By("ensure mark is written to EgressIP annotations") + gomega.Consistently(func() string { + mark, err := getEgressIPAnnotationValue(eIP2WithoutMark.Name)() + gomega.Expect(err).Should(gomega.Succeed(), "failed to get mark from annotation") + return mark + }).ShouldNot(gomega.Equal("")) + ginkgo.By("ensure mark cache contains correct values") + eIP2MarkStr, err := getEgressIPAnnotationValue(eIP2WithoutMark.Name)() + gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + eIP2Mark, err := strconv.Atoi(eIP2MarkStr) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + // call allocate with the egress IP name and then ensure the returned mark is the same annotation on the EgressIP obj + eIPMarkFromCache, _, err := fakeClusterManagerOVN.eIPC.getOrAllocMark(egressIPName) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + gomega.Expect(eIPMark).Should(gomega.Equal(eIPMarkFromCache)) + eIP2MarkFromCache, _, err := fakeClusterManagerOVN.eIPC.getOrAllocMark(egressIPName2) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + gomega.Expect(eIP2Mark).Should(gomega.Equal(eIP2MarkFromCache)) + gomega.Expect(eIPMarkFromCache).ShouldNot(gomega.Equal(eIP2MarkFromCache)) + return nil + } + err := app.Run([]string{app.Name}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) }) ginkgo.Context("AddEgressIP for IPv4", func() { @@ -3931,8 +4100,8 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { &egressipv1.EgressIPList{Items: []egressipv1.EgressIP{eIP1}}, ) - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode1.name] = &egressNode1 - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode2.name] = &egressNode2 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode1.name] = &egressNode1 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode2.name] = &egressNode2 _, err := fakeClusterManagerOVN.eIPC.WatchEgressIP() gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -4022,13 +4191,16 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { &egressipv1.EgressIPList{Items: []egressipv1.EgressIP{eIP1}}, ) - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode1.name] = &egressNode1 - fakeClusterManagerOVN.eIPC.allocator.cache[egressNode2.name] = &egressNode2 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode1.name] = &egressNode1 + fakeClusterManagerOVN.eIPC.nodeAllocator.cache[egressNode2.name] = &egressNode2 _, err := fakeClusterManagerOVN.eIPC.WatchEgressIP() gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Eventually(getEgressIPStatusLen(egressIPName)).Should(gomega.Equal(1)) egressIPs, nodes := getEgressIPStatus(egressIPName) + // store mark in-order to compare with mark after an update and ensure it has not been altered + markBeforeUpdate, err := getEgressIPAnnotationValue(egressIPName)() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(nodes[0]).To(gomega.Equal(egressNode2.name)) gomega.Expect(egressIPs[0]).To(gomega.Equal(egressIP)) eIPToUpdate, err := fakeClusterManagerOVN.fakeClient.EgressIPClient.K8sV1().EgressIPs().Get(context.TODO(), eIP1.Name, metav1.GetOptions{}) @@ -4049,6 +4221,12 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { gomega.Eventually(getEgressIP).Should(gomega.Equal(updateEgressIP)) _, nodes = getEgressIPStatus(egressIPName) gomega.Expect(nodes[0]).To(gomega.Equal(egressNode2.name)) + markAfterUpdate, err := getEgressIPAnnotationValue(egressIPName)() + gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + gomega.Expect(markBeforeUpdate).Should(gomega.Equal(markAfterUpdate), "The mark should be consistent") + markInCache, _, err := fakeClusterManagerOVN.eIPC.getOrAllocMark(egressIPName) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + gomega.Expect(markAfterUpdate).Should(gomega.Equal(fmt.Sprintf("%d", markInCache))) return nil } @@ -4056,4 +4234,24 @@ var _ = ginkgo.Describe("OVN cluster-manager EgressIP Operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) }) + + ginkgo.Context("EgressIP Mark cache", func() { + ginkgo.It("should round robin when mark range is exhausted", func() { + nodeAlloc := getEgressIPMarkAllocator() + ecc := &egressIPClusterController{markAllocator: nodeAlloc} + eipMarkMin = 50000 + eipMarkMax = eipMarkMin + 50 + defer func() { + eipMarkMax = util.EgressIPMarkMax + eipMarkMin = util.EgressIPMarkBase + }() + for i := 0; i < 50; i++ { + mark, _, err := ecc.getOrAllocMark(egressIPName) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + gomega.Expect(mark).Should(gomega.BeNumerically("<=", eipMarkMax), "mark should be less than allowable max") + gomega.Expect(mark).Should(gomega.BeNumerically(">=", eipMarkMin), "mark should be greater or equal to allowable min") + ecc.deallocMark(egressIPName) + } + }) + }) }) diff --git a/go-controller/pkg/clustermanager/egressip_event_handler.go b/go-controller/pkg/clustermanager/egressip_event_handler.go index 8544b48323..88348b39bb 100644 --- a/go-controller/pkg/clustermanager/egressip_event_handler.go +++ b/go-controller/pkg/clustermanager/egressip_event_handler.go @@ -204,7 +204,7 @@ func (h *egressIPClusterControllerEventHandler) SyncFunc(objs []interface{}) err } else { switch h.objType { case factory.EgressIPType: - syncFunc = nil + syncFunc = h.eIPC.syncEgressIPMarkAllocator case factory.EgressNodeType: syncFunc = h.eIPC.initEgressNodeReachability case factory.CloudPrivateIPConfigType: diff --git a/go-controller/pkg/clustermanager/egressservice/egressservice_cluster.go b/go-controller/pkg/clustermanager/egressservice/egressservice_cluster.go index 0e84ba2758..6bb7536e95 100644 --- a/go-controller/pkg/clustermanager/egressservice/egressservice_cluster.go +++ b/go-controller/pkg/clustermanager/egressservice/egressservice_cluster.go @@ -61,10 +61,10 @@ type Controller struct { watchFactory *factory.WatchFactory egressServiceLister egressservicelisters.EgressServiceLister egressServiceSynced cache.InformerSynced - egressServiceQueue workqueue.RateLimitingInterface + egressServiceQueue workqueue.TypedRateLimitingInterface[string] servicesSynced cache.InformerSynced endpointSlicesSynced cache.InformerSynced - nodesQueue workqueue.RateLimitingInterface + nodesQueue workqueue.TypedRateLimitingInterface[string] nodesSynced cache.InformerSynced IsReachable func(nodeName string, mgmtIPs []net.IP, healthClient healthcheck.EgressIPHealthClient) bool // TODO: make a universal cache instead @@ -114,9 +114,9 @@ func NewController( esInformer := wf.EgressServiceInformer() c.egressServiceLister = esInformer.Lister() c.egressServiceSynced = esInformer.Informer().HasSynced - c.egressServiceQueue = workqueue.NewNamedRateLimitingQueue( - workqueue.NewItemFastSlowRateLimiter(1*time.Second, 5*time.Second, 5), - "egressservices", + c.egressServiceQueue = workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemFastSlowRateLimiter[string](1*time.Second, 5*time.Second, 5), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "egressservices"}, ) _, err := esInformer.Informer().AddEventHandler(factory.WithUpdateHandlingForObjReplace(cache.ResourceEventHandlerFuncs{ AddFunc: c.onEgressServiceAdd, @@ -151,9 +151,9 @@ func NewController( } c.nodesSynced = wf.NodeInformer().HasSynced - c.nodesQueue = workqueue.NewNamedRateLimitingQueue( - workqueue.NewItemFastSlowRateLimiter(1*time.Second, 5*time.Second, 5), - "egressservicenodes", + c.nodesQueue = workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemFastSlowRateLimiter[string](1*time.Second, 5*time.Second, 5), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "egressservicenodes"}, ) _, err = wf.NodeInformer().AddEventHandler(factory.WithUpdateHandlingForObjReplace(cache.ResourceEventHandlerFuncs{ AddFunc: c.onNodeAdd, @@ -443,7 +443,7 @@ func (c *Controller) processNextEgressServiceWorkItem(wg *sync.WaitGroup) bool { defer c.egressServiceQueue.Done(key) - err := c.syncEgressService(key.(string)) + err := c.syncEgressService(key) if err == nil { c.egressServiceQueue.Forget(key) return true diff --git a/go-controller/pkg/clustermanager/egressservice/egressservice_cluster_endpointslices.go b/go-controller/pkg/clustermanager/egressservice/egressservice_cluster_endpointslices.go index 8979b940c7..8ebda6f57b 100644 --- a/go-controller/pkg/clustermanager/egressservice/egressservice_cluster_endpointslices.go +++ b/go-controller/pkg/clustermanager/egressservice/egressservice_cluster_endpointslices.go @@ -61,7 +61,7 @@ func (c *Controller) onEndpointSliceDelete(obj interface{}) { } func (c *Controller) queueServiceForEndpointSlice(endpointSlice *discovery.EndpointSlice) { - key, err := services.ServiceControllerKey(endpointSlice) + key, err := services.GetServiceKeyFromEndpointSliceForDefaultNetwork(endpointSlice) if err != nil { // Do not log endpointsSlices missing service labels as errors. // Once the service label is eventually added, we will get this event diff --git a/go-controller/pkg/clustermanager/egressservice/egressservice_cluster_node.go b/go-controller/pkg/clustermanager/egressservice/egressservice_cluster_node.go index e3ab9dfb1e..08e878b705 100644 --- a/go-controller/pkg/clustermanager/egressservice/egressservice_cluster_node.go +++ b/go-controller/pkg/clustermanager/egressservice/egressservice_cluster_node.go @@ -138,7 +138,7 @@ func (c *Controller) processNextNodeWorkItem(wg *sync.WaitGroup) bool { defer c.nodesQueue.Done(key) - err := c.syncNode(key.(string)) + err := c.syncNode(key) if err == nil { c.nodesQueue.Forget(key) return true diff --git a/go-controller/pkg/clustermanager/egressservice_cluster_test.go b/go-controller/pkg/clustermanager/egressservice_cluster_test.go index d4ff0b3a5d..fbc012d14a 100644 --- a/go-controller/pkg/clustermanager/egressservice_cluster_test.go +++ b/go-controller/pkg/clustermanager/egressservice_cluster_test.go @@ -7,7 +7,7 @@ import ( "reflect" "time" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" egressserviceapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1" diff --git a/go-controller/pkg/clustermanager/endpointslicemirror/endpointslice_mirror_controller.go b/go-controller/pkg/clustermanager/endpointslicemirror/endpointslice_mirror_controller.go index eeca6530aa..7b0e17185c 100644 --- a/go-controller/pkg/clustermanager/endpointslicemirror/endpointslice_mirror_controller.go +++ b/go-controller/pkg/clustermanager/endpointslicemirror/endpointslice_mirror_controller.go @@ -7,7 +7,6 @@ import ( "sync" "time" - nadlister "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/listers/k8s.cni.cncf.io/v1" v1 "k8s.io/api/discovery/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/validation" @@ -23,19 +22,12 @@ import ( "k8s.io/klog/v2" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" + networkAttachDefController "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/network-attach-def-controller" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) -const ( - maxRetries = 10 - // LabelSourceEndpointSlice label key used in mirrored EndpointSlice - // that has the value of the default EndpointSlice name - LabelSourceEndpointSlice = "k8s.ovn.org/source-endpointslice" - // LabelSourceEndpointSliceVersion label key used in mirrored EndpointSlice - // that has the value of the last known default EndpointSlice ResourceVersion - LabelSourceEndpointSliceVersion = "k8s.ovn.org/source-endpointslice-version" -) +const maxRetries = 10 // Controller represents the EndpointSlice mirror controller. // For namespaces that use a user-defined primary network, this controller mirrors the default EndpointSlices @@ -44,15 +36,14 @@ const ( type Controller struct { kubeClient kubernetes.Interface wg *sync.WaitGroup - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] name string endpointSliceLister discoverylisters.EndpointSliceLister endpointSlicesSynced cache.InformerSynced podLister corelisters.PodLister podsSynced cache.InformerSynced - nadLister nadlister.NetworkAttachmentDefinitionLister - nadsSynced cache.InformerSynced + nadController *networkAttachDefController.NetAttachDefinitionController cancel context.CancelFunc } @@ -62,7 +53,7 @@ type Controller struct { // For other EndpointSlices it returns an empty value. func (c *Controller) getDefaultEndpointSliceKey(endpointSlice *v1.EndpointSlice) string { if c.isManagedByController(endpointSlice) { - defaultEndpointSliceName, found := endpointSlice.Labels[LabelSourceEndpointSlice] + defaultEndpointSliceName, found := endpointSlice.Labels[types.LabelSourceEndpointSlice] if !found { utilruntime.HandleError(fmt.Errorf("couldn't determine the source EndpointSlice for %s", cache.MetaObjectToName(endpointSlice))) return "" @@ -120,26 +111,24 @@ func (c *Controller) onEndpointSliceAdd(obj interface{}) { func NewController( ovnClient *util.OVNClusterManagerClientset, - wf *factory.WatchFactory) (*Controller, error) { + wf *factory.WatchFactory, nadController *networkAttachDefController.NetAttachDefinitionController) (*Controller, error) { wg := &sync.WaitGroup{} c := &Controller{ - kubeClient: ovnClient.KubeClient, - wg: wg, - name: types.EndpointSliceMirrorControllerName, + kubeClient: ovnClient.KubeClient, + wg: wg, + name: types.EndpointSliceMirrorControllerName, + nadController: nadController, } - c.queue = workqueue.NewRateLimitingQueueWithConfig( - workqueue.NewItemFastSlowRateLimiter(1*time.Second, 5*time.Second, 5), - workqueue.RateLimitingQueueConfig{Name: c.name}, + c.queue = workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemFastSlowRateLimiter[string](1*time.Second, 5*time.Second, 5), + workqueue.TypedRateLimitingQueueConfig[string]{Name: c.name}, ) c.podLister = wf.PodCoreInformer().Lister() c.podsSynced = wf.PodCoreInformer().Informer().HasSynced - c.nadLister = wf.NADInformer().Lister() - c.nadsSynced = wf.NADInformer().Informer().HasSynced - endpointSlicesInformer := wf.EndpointSliceCoreInformer() c.endpointSliceLister = endpointSlicesInformer.Lister() c.endpointSlicesSynced = endpointSlicesInformer.Informer().HasSynced @@ -161,11 +150,6 @@ func (c *Controller) Start(ctx context.Context, threadiness int) error { ctx, cancel := context.WithCancel(ctx) c.cancel = cancel klog.Infof("Starting the EndpointSlice mirror controller") - if !util.WaitForInformerCacheSyncWithTimeout(c.name, ctx.Done(), c.endpointSlicesSynced, c.podsSynced, - c.nadsSynced) { - return fmt.Errorf("timed out waiting for caches to sync") - } - klog.Infof("Repairing EndpointSlice mirrors") err := c.repair(ctx) if err != nil { @@ -225,7 +209,7 @@ func (c *Controller) processNextEgressServiceWorkItem(ctx context.Context, wg *s defer c.queue.Done(key) - err := c.syncDefaultEndpointSlice(ctx, key.(string)) + err := c.syncDefaultEndpointSlice(ctx, key) if err == nil { c.queue.Forget(key) return true @@ -260,7 +244,7 @@ func (c *Controller) syncDefaultEndpointSlice(ctx context.Context, key string) e return err } - namespacePrimaryNetwork, err := util.GetActiveNetworkForNamespace(namespace, c.nadLister) + namespacePrimaryNetwork, err := c.nadController.GetActiveNetworkForNamespace(namespace) if err != nil { return err } @@ -270,8 +254,8 @@ func (c *Controller) syncDefaultEndpointSlice(ctx context.Context, key string) e } mirrorEndpointSliceSelector := labels.Set(map[string]string{ - LabelSourceEndpointSlice: name, - v1.LabelManagedBy: c.name, + types.LabelSourceEndpointSlice: name, + v1.LabelManagedBy: c.name, }).AsSelectorPreValidated() klog.Infof("Processing %s/%s EndpointSlice in %q primary network", namespace, name, namespacePrimaryNetwork.GetNetworkName()) @@ -302,7 +286,7 @@ func (c *Controller) syncDefaultEndpointSlice(ctx context.Context, key string) e if defaultEndpointSlice == nil { if mirroredEndpointSlice != nil { - klog.Infof("The default EndpointSlice %s/%s no longer exists, removing the mirrored one: %s", namespace, mirroredEndpointSlice.Labels[LabelSourceEndpointSlice], cache.MetaObjectToName(mirroredEndpointSlice)) + klog.Infof("The default EndpointSlice %s/%s no longer exists, removing the mirrored one: %s", namespace, mirroredEndpointSlice.Labels[types.LabelSourceEndpointSlice], cache.MetaObjectToName(mirroredEndpointSlice)) return c.kubeClient.DiscoveryV1().EndpointSlices(namespace).Delete(ctx, mirroredEndpointSlice.Name, metav1.DeleteOptions{}) } klog.Infof("The default EndpointSlice %s/%s no longer exists", namespace, name) @@ -320,7 +304,7 @@ func (c *Controller) syncDefaultEndpointSlice(ctx context.Context, key string) e if mirroredEndpointSlice != nil { // nothing to do if we already reconciled this exact EndpointSlice - if mirroredResourceVersion, ok := mirroredEndpointSlice.Labels[LabelSourceEndpointSliceVersion]; ok { + if mirroredResourceVersion, ok := mirroredEndpointSlice.Labels[types.LabelSourceEndpointSliceVersion]; ok { if mirroredResourceVersion == defaultEndpointSlice.ResourceVersion { return nil } @@ -420,8 +404,8 @@ func (c *Controller) mirrorEndpointSlice(mirroredEndpointSlice, defaultEndpointS // set the custom labels, generateName and reset the endpoints currentMirror.Labels[v1.LabelManagedBy] = c.name - currentMirror.Labels[LabelSourceEndpointSlice] = defaultEndpointSlice.Name - currentMirror.Labels[LabelSourceEndpointSliceVersion] = defaultEndpointSlice.ResourceVersion + currentMirror.Labels[types.LabelSourceEndpointSlice] = defaultEndpointSlice.Name + currentMirror.Labels[types.LabelSourceEndpointSliceVersion] = defaultEndpointSlice.ResourceVersion currentMirror.Labels[types.LabelUserDefinedEndpointSliceNetwork] = network.GetNetworkName() currentMirror.Labels[types.LabelUserDefinedServiceName] = defaultEndpointSlice.Labels[v1.LabelServiceName] diff --git a/go-controller/pkg/clustermanager/endpointslicemirror/endpointslice_mirror_controller_suite_test.go b/go-controller/pkg/clustermanager/endpointslicemirror/endpointslice_mirror_controller_suite_test.go index 8894533003..029efed665 100644 --- a/go-controller/pkg/clustermanager/endpointslicemirror/endpointslice_mirror_controller_suite_test.go +++ b/go-controller/pkg/clustermanager/endpointslicemirror/endpointslice_mirror_controller_suite_test.go @@ -3,7 +3,7 @@ package endpointslicemirror import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/clustermanager/endpointslicemirror/endpointslice_mirror_controller_test.go b/go-controller/pkg/clustermanager/endpointslicemirror/endpointslice_mirror_controller_test.go index d9ea2e8482..5f31e312ae 100644 --- a/go-controller/pkg/clustermanager/endpointslicemirror/endpointslice_mirror_controller_test.go +++ b/go-controller/pkg/clustermanager/endpointslicemirror/endpointslice_mirror_controller_test.go @@ -5,7 +5,7 @@ import ( "fmt" "time" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "github.com/urfave/cli/v2" v1 "k8s.io/api/core/v1" @@ -17,16 +17,20 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" + nad "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/network-attach-def-controller" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" + kubetest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" + fakenad "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/nad" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) var _ = ginkgo.Describe("Cluster manager EndpointSlice mirror controller", func() { var ( - app *cli.App - controller *Controller - fakeClient *util.OVNClusterManagerClientset + app *cli.App + controller *Controller + fakeClient *util.OVNClusterManagerClientset + nadController *nad.NetAttachDefinitionController ) start := func(objects ...runtime.Object) { @@ -36,18 +40,24 @@ var _ = ginkgo.Describe("Cluster manager EndpointSlice mirror controller", func( fakeClient = util.GetOVNClientset(objects...).GetClusterManagerClientset() wf, err := factory.NewClusterManagerWatchFactory(fakeClient) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - controller, err = NewController(fakeClient, wf) + nadController, err = nad.NewNetAttachDefinitionController("test", &fakenad.FakeNetworkControllerManager{}, wf, nil) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + controller, err = NewController(fakeClient, wf, nadController) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = wf.Start() gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = nadController.Start() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = controller.Start(context.Background(), 1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } ginkgo.BeforeEach(func() { - config.PrepareTestConfig() + err := config.PrepareTestConfig() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) config.OVNKubernetesFeature.EnableInterconnect = true config.OVNKubernetesFeature.EnableMultiNetwork = true config.OVNKubernetesFeature.EnableNetworkSegmentation = true @@ -60,6 +70,9 @@ var _ = ginkgo.Describe("Cluster manager EndpointSlice mirror controller", func( if controller != nil { controller.Stop() } + if nadController != nil { + nadController.Stop() + } }) ginkgo.Context("on startup repair", func() { @@ -70,7 +83,7 @@ var _ = ginkgo.Describe("Cluster manager EndpointSlice mirror controller", func( ObjectMeta: metav1.ObjectMeta{ Name: "test-pod", Namespace: namespaceT.Name, - Annotations: map[string]string{util.OvnPodAnnotationName: `{"default":{"mac_address":"0a:58:0a:f4:02:03","ip_address":"10.244.2.3/24","role":"infrastructure-locked"},"testns/l3-network":{"mac_address":"0a:58:0a:80:02:04","ip_address":"10.128.2.4/24","role":"primary"}}`}, + Annotations: map[string]string{util.OvnPodAnnotationName: `{"default":{"mac_address":"0a:58:0a:f4:02:03","ip_address":"10.244.2.3/24","role":"infrastructure-locked"},"testns/l3-network":{"mac_address":"0a:58:0a:84:02:04","ip_address":"10.132.2.4/24","role":"primary"}}`}, }, Status: v1.PodStatus{Phase: v1.PodRunning}, } @@ -95,8 +108,8 @@ var _ = ginkgo.Describe("Cluster manager EndpointSlice mirror controller", func( }, }, } - staleEndpointSlice := mirrorEndpointSlice(&defaultEndpointSlice, "l3-network") - staleEndpointSlice.Labels[LabelSourceEndpointSlice] = "non-existing-endpointslice" + staleEndpointSlice := kubetest.MirrorEndpointSlice(&defaultEndpointSlice, "l3-network", false) + staleEndpointSlice.Labels[types.LabelSourceEndpointSlice] = "non-existing-endpointslice" objs := []runtime.Object{ &v1.PodList{ @@ -119,9 +132,11 @@ var _ = ginkgo.Describe("Cluster manager EndpointSlice mirror controller", func( start(objs...) + nad := testing.GenerateNAD("l3-network", "l3-network", namespaceT.Name, types.Layer3Topology, "10.132.2.0/16/24", types.NetworkRolePrimary) + _, err := fakeClient.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(namespaceT.Name).Create( context.TODO(), - testing.GenerateNAD("l3-network", "l3-network", namespaceT.Name, types.Layer3Topology, "10.128.2.0/16/24", types.NetworkRolePrimary), + nad, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) @@ -144,8 +159,8 @@ var _ = ginkgo.Describe("Cluster manager EndpointSlice mirror controller", func( // new mirrored EndpointSlice should get created mirrorEndpointSliceSelector := labels.Set(map[string]string{ - LabelSourceEndpointSlice: defaultEndpointSlice.Name, - discovery.LabelManagedBy: types.EndpointSliceMirrorControllerName, + types.LabelSourceEndpointSlice: defaultEndpointSlice.Name, + discovery.LabelManagedBy: types.EndpointSliceMirrorControllerName, }).AsSelectorPreValidated() mirroredEndpointSlices, err = fakeClient.KubeClient.DiscoveryV1().EndpointSlices(namespaceT.Name).List(context.TODO(), metav1.ListOptions{LabelSelector: mirrorEndpointSliceSelector.String()}) @@ -162,7 +177,7 @@ var _ = ginkgo.Describe("Cluster manager EndpointSlice mirror controller", func( gomega.Expect(mirroredEndpointSlices.Items[0].Endpoints).To(gomega.HaveLen(1)) gomega.Expect(mirroredEndpointSlices.Items[0].Endpoints[0].Addresses).To(gomega.HaveLen(1)) // check if the Address is set to the primary IP - gomega.Expect(mirroredEndpointSlices.Items[0].Endpoints[0].Addresses[0]).To(gomega.BeEquivalentTo("10.128.2.4")) + gomega.Expect(mirroredEndpointSlices.Items[0].Endpoints[0].Addresses[0]).To(gomega.BeEquivalentTo("10.132.2.4")) return nil } @@ -181,7 +196,7 @@ var _ = ginkgo.Describe("Cluster manager EndpointSlice mirror controller", func( ObjectMeta: metav1.ObjectMeta{ Name: "test-pod", Namespace: namespaceT.Name, - Annotations: map[string]string{util.OvnPodAnnotationName: `{"default":{"mac_address":"0a:58:0a:f4:02:03","ip_address":"10.244.2.3/24","role":"primary"},"testns/l3-network":{"mac_address":"0a:58:0a:80:02:04","ip_address":"10.128.2.4/24","role":"secondary}}`}, + Annotations: map[string]string{util.OvnPodAnnotationName: `{"default":{"mac_address":"0a:58:0a:f4:02:03","ip_address":"10.244.2.3/24","role":"primary"},"testns/l3-network":{"mac_address":"0a:58:0a:84:02:04","ip_address":"10.132.2.4/24","role":"secondary}}`}, }, Status: v1.PodStatus{Phase: v1.PodRunning}, } @@ -229,7 +244,7 @@ var _ = ginkgo.Describe("Cluster manager EndpointSlice mirror controller", func( _, err := fakeClient.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(namespaceT.Name).Create( context.TODO(), - testing.GenerateNAD("l3-network", "l3-network", namespaceT.Name, types.Layer3Topology, "10.128.2.0/16/24", types.NetworkRoleSecondary), + testing.GenerateNAD("l3-network", "l3-network", namespaceT.Name, types.Layer3Topology, "10.132.2.0/16/24", types.NetworkRoleSecondary), metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) @@ -273,7 +288,7 @@ var _ = ginkgo.Describe("Cluster manager EndpointSlice mirror controller", func( ObjectMeta: metav1.ObjectMeta{ Name: "test-pod", Namespace: namespaceT.Name, - Annotations: map[string]string{util.OvnPodAnnotationName: `{"default":{"mac_address":"0a:58:0a:f4:02:03","ip_address":"10.244.2.3/24","role":"infrastructure-locked"},"testns/l3-network":{"mac_address":"0a:58:0a:80:02:04","ip_address":"10.128.2.4/24","role":"primary"}}`}, + Annotations: map[string]string{util.OvnPodAnnotationName: `{"default":{"mac_address":"0a:58:0a:f4:02:03","ip_address":"10.244.2.3/24","role":"infrastructure-locked"},"testns/l3-network":{"mac_address":"0a:58:0a:84:02:04","ip_address":"10.132.2.4/24","role":"primary"}}`}, }, Status: v1.PodStatus{Phase: v1.PodRunning}, } @@ -299,7 +314,7 @@ var _ = ginkgo.Describe("Cluster manager EndpointSlice mirror controller", func( }, }, } - mirroredEndpointSlice := mirrorEndpointSlice(&defaultEndpointSlice, "l3-network") + mirroredEndpointSlice := kubetest.MirrorEndpointSlice(&defaultEndpointSlice, "l3-network", false) objs := []runtime.Object{ &v1.PodList{ Items: []v1.Pod{ @@ -321,9 +336,10 @@ var _ = ginkgo.Describe("Cluster manager EndpointSlice mirror controller", func( start(objs...) + nad := testing.GenerateNAD("l3-network", "l3-network", namespaceT.Name, types.Layer3Topology, "10.132.2.0/16/24", types.NetworkRolePrimary) _, err := fakeClient.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(namespaceT.Name).Create( context.TODO(), - testing.GenerateNAD("l3-network", "l3-network", namespaceT.Name, types.Layer3Topology, "10.128.2.0/16/24", types.NetworkRolePrimary), + nad, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) @@ -343,8 +359,8 @@ var _ = ginkgo.Describe("Cluster manager EndpointSlice mirror controller", func( // mirrored EndpointSlices should exist mirrorEndpointSliceSelector := labels.Set(map[string]string{ - LabelSourceEndpointSlice: defaultEndpointSlice.Name, - discovery.LabelManagedBy: types.EndpointSliceMirrorControllerName, + types.LabelSourceEndpointSlice: defaultEndpointSlice.Name, + discovery.LabelManagedBy: types.EndpointSliceMirrorControllerName, }).AsSelectorPreValidated() mirroredEndpointSlices, err = fakeClient.KubeClient.DiscoveryV1().EndpointSlices(namespaceT.Name).List(context.TODO(), metav1.ListOptions{LabelSelector: mirrorEndpointSliceSelector.String()}) @@ -360,14 +376,14 @@ var _ = ginkgo.Describe("Cluster manager EndpointSlice mirror controller", func( return nil }).WithTimeout(5 * time.Second).ShouldNot(gomega.HaveOccurred()) gomega.Expect(mirroredEndpointSlices.Items[0].Endpoints[0].Addresses).To(gomega.HaveLen(1)) - gomega.Expect(mirroredEndpointSlices.Items[0].Endpoints[0].Addresses).To(gomega.BeEquivalentTo([]string{"10.128.2.4"})) + gomega.Expect(mirroredEndpointSlices.Items[0].Endpoints[0].Addresses).To(gomega.BeEquivalentTo([]string{"10.132.2.4"})) ginkgo.By("when the EndpointSlice changes the mirrored one gets updated") newPod := v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "test-pod-new", Namespace: namespaceT.Name, - Annotations: map[string]string{util.OvnPodAnnotationName: `{"default":{"mac_address":"0a:58:0a:f4:02:04","ip_address":"10.244.2.4/24","primary":false},"testns/l3-network":{"mac_address":"0a:58:0a:80:02:05","ip_address":"10.128.2.5/24","primary":true}}`}, + Annotations: map[string]string{util.OvnPodAnnotationName: `{"default":{"mac_address":"0a:58:0a:f4:02:04","ip_address":"10.244.2.4/24","primary":false},"testns/l3-network":{"mac_address":"0a:58:0a:84:02:05","ip_address":"10.132.2.5/24","primary":true}}`}, }, Status: v1.PodStatus{Phase: v1.PodRunning}, } @@ -400,8 +416,8 @@ var _ = ginkgo.Describe("Cluster manager EndpointSlice mirror controller", func( } mirrorEndpointSliceSelector := labels.Set(map[string]string{ - LabelSourceEndpointSlice: defaultEndpointSlice.Name, - discovery.LabelManagedBy: types.EndpointSliceMirrorControllerName, + types.LabelSourceEndpointSlice: defaultEndpointSlice.Name, + discovery.LabelManagedBy: types.EndpointSliceMirrorControllerName, }).AsSelectorPreValidated() mirroredEndpointSlices, err = fakeClient.KubeClient.DiscoveryV1().EndpointSlices(namespaceT.Name).List(context.TODO(), metav1.ListOptions{LabelSelector: mirrorEndpointSliceSelector.String()}) @@ -418,8 +434,8 @@ var _ = ginkgo.Describe("Cluster manager EndpointSlice mirror controller", func( return nil }).WithTimeout(5 * time.Second).ShouldNot(gomega.HaveOccurred()) - gomega.Expect(mirroredEndpointSlices.Items[0].Endpoints[0].Addresses[0]).To(gomega.BeEquivalentTo("10.128.2.4")) - gomega.Expect(mirroredEndpointSlices.Items[0].Endpoints[1].Addresses[0]).To(gomega.BeEquivalentTo("10.128.2.5")) + gomega.Expect(mirroredEndpointSlices.Items[0].Endpoints[0].Addresses[0]).To(gomega.BeEquivalentTo("10.132.2.4")) + gomega.Expect(mirroredEndpointSlices.Items[0].Endpoints[1].Addresses[0]).To(gomega.BeEquivalentTo("10.132.2.5")) ginkgo.By("when the default EndpointSlice is removed the mirrored one follows") err = fakeClient.KubeClient.DiscoveryV1().EndpointSlices(newPod.Namespace).Delete(context.TODO(), defaultEndpointSlice.Name, metav1.DeleteOptions{}) @@ -427,8 +443,8 @@ var _ = ginkgo.Describe("Cluster manager EndpointSlice mirror controller", func( gomega.Eventually(func() error { mirrorEndpointSliceSelector := labels.Set(map[string]string{ - LabelSourceEndpointSlice: defaultEndpointSlice.Name, - discovery.LabelManagedBy: types.EndpointSliceMirrorControllerName, + types.LabelSourceEndpointSlice: defaultEndpointSlice.Name, + discovery.LabelManagedBy: types.EndpointSliceMirrorControllerName, }).AsSelectorPreValidated() mirroredEndpointSlices, err = fakeClient.KubeClient.DiscoveryV1().EndpointSlices(namespaceT.Name).List(context.TODO(), metav1.ListOptions{LabelSelector: mirrorEndpointSliceSelector.String()}) @@ -449,16 +465,3 @@ var _ = ginkgo.Describe("Cluster manager EndpointSlice mirror controller", func( }) }) - -func mirrorEndpointSlice(defaultEndpointSlice *discovery.EndpointSlice, network string) *discovery.EndpointSlice { - var mirror *discovery.EndpointSlice - - mirror = defaultEndpointSlice.DeepCopy() - mirror.Name = defaultEndpointSlice.Name + "-mirrored" - mirror.Labels[discovery.LabelManagedBy] = types.EndpointSliceMirrorControllerName - mirror.Labels[LabelSourceEndpointSlice] = defaultEndpointSlice.Name - mirror.Labels[types.LabelUserDefinedEndpointSliceNetwork] = network - mirror.Labels[types.LabelUserDefinedServiceName] = defaultEndpointSlice.Labels[discovery.LabelServiceName] - mirror.Endpoints = nil - return mirror -} diff --git a/go-controller/pkg/clustermanager/fake_cluster_manager_test.go b/go-controller/pkg/clustermanager/fake_cluster_manager_test.go index bfc07d7478..0d3bde22d2 100644 --- a/go-controller/pkg/clustermanager/fake_cluster_manager_test.go +++ b/go-controller/pkg/clustermanager/fake_cluster_manager_test.go @@ -16,6 +16,7 @@ import ( egresssvc "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1" egresssvcfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/clientset/versioned/fake" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" + nad "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/network-attach-def-controller" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/healthcheck" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" "k8s.io/apimachinery/pkg/runtime" @@ -89,7 +90,8 @@ func (o *FakeClusterManager) init() { gomega.Expect(err).ToNot(gomega.HaveOccurred()) } if util.IsNetworkSegmentationSupportEnabled() { - o.epsMirror, err = endpointslicemirror.NewController(o.fakeClient, o.watcher) + nadController := &nad.NetAttachDefinitionController{} + o.epsMirror, err = endpointslicemirror.NewController(o.fakeClient, o.watcher, nadController) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = o.epsMirror.Start(context.TODO(), 1) @@ -98,14 +100,16 @@ func (o *FakeClusterManager) init() { } func (o *FakeClusterManager) shutdown() { - o.watcher.Shutdown() - if config.OVNKubernetesFeature.EnableEgressIP { + if o.watcher != nil { + o.watcher.Shutdown() + } + if config.OVNKubernetesFeature.EnableEgressIP && o.eIPC != nil { o.eIPC.Stop() } - if config.OVNKubernetesFeature.EnableEgressService { + if config.OVNKubernetesFeature.EnableEgressService && o.esvc != nil { o.esvc.Stop() } - if util.IsNetworkSegmentationSupportEnabled() { + if util.IsNetworkSegmentationSupportEnabled() && o.epsMirror != nil { o.epsMirror.Stop() } } diff --git a/go-controller/pkg/clustermanager/network_cluster_controller.go b/go-controller/pkg/clustermanager/network_cluster_controller.go index 0d244a8b72..0e06785e94 100644 --- a/go-controller/pkg/clustermanager/network_cluster_controller.go +++ b/go-controller/pkg/clustermanager/network_cluster_controller.go @@ -8,17 +8,15 @@ import ( "reflect" "sync" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" cache "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/retry" "k8s.io/klog/v2" ipamclaimsapi "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" - nadlister "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/listers/k8s.cni.cncf.io/v1" - + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/id" idallocator "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/id" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/ip/subnet" annotationalloc "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/pod" @@ -27,12 +25,15 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" + networkAttachDefController "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/network-attach-def-controller" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/persistentips" objretry "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/retry" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) +type NetworkStatusReporter func(networkName string, fieldManager string, condition *metav1.Condition, events ...*util.EventDetails) error + // networkClusterController is the cluster controller for the networks. An // instance of this struct is expected to be created for each network. A network // is identified by its name and its unique id. It handles events at a cluster @@ -56,20 +57,34 @@ type networkClusterController struct { // retry framework for persistent ip allocation ipamClaimHandler *factory.Handler retryIPAMClaims *objretry.RetryFramework - + // tunnelIDAllocator of tunnelIDs within the network + tunnelIDAllocator id.Allocator podAllocator *pod.PodAllocator nodeAllocator *node.NodeAllocator networkIDAllocator idallocator.NamedAllocator ipamClaimReconciler *persistentips.IPAMClaimReconciler subnetAllocator subnet.Allocator + nadController *networkAttachDefController.NetAttachDefinitionController + // event recorder used to post events to k8s recorder record.EventRecorder + statusReporter NetworkStatusReporter + + // nodeName: errMessage + nodeErrors map[string]string + nodeErrorsLock sync.Mutex + // Error condition only reports one of the failed nodes. + // To avoid changing that error report with every update, we store reported error node. + reportedErrorNode string + util.NetInfo } -func newNetworkClusterController(networkIDAllocator idallocator.NamedAllocator, netInfo util.NetInfo, ovnClient *util.OVNClusterManagerClientset, wf *factory.WatchFactory, recorder record.EventRecorder) *networkClusterController { +func newNetworkClusterController(networkIDAllocator idallocator.NamedAllocator, netInfo util.NetInfo, + ovnClient *util.OVNClusterManagerClientset, wf *factory.WatchFactory, recorder record.EventRecorder, + nadController *networkAttachDefController.NetAttachDefinitionController, errorReporter NetworkStatusReporter) *networkClusterController { kube := &kube.KubeOVN{ Kube: kube.Kube{ KClient: ovnClient.KubeClient, @@ -87,6 +102,10 @@ func newNetworkClusterController(networkIDAllocator idallocator.NamedAllocator, wg: wg, networkIDAllocator: networkIDAllocator, recorder: recorder, + nadController: nadController, + statusReporter: errorReporter, + nodeErrors: make(map[string]string), + nodeErrorsLock: sync.Mutex{}, } return ncc @@ -95,18 +114,15 @@ func newNetworkClusterController(networkIDAllocator idallocator.NamedAllocator, func newDefaultNetworkClusterController(netInfo util.NetInfo, ovnClient *util.OVNClusterManagerClientset, wf *factory.WatchFactory, recorder record.EventRecorder) *networkClusterController { // use an allocator that can only allocate a single network ID for the // defaiult network - networkIDAllocator, err := idallocator.NewIDAllocator(types.DefaultNetworkName, 1) - if err != nil { - panic(fmt.Errorf("could not build ID allocator for default network: %w", err)) - } + networkIDAllocator := idallocator.NewIDAllocator(types.DefaultNetworkName, 1) // Reserve the id 0 for the default network. - err = networkIDAllocator.ReserveID(types.DefaultNetworkName, defaultNetworkID) + err := networkIDAllocator.ReserveID(types.DefaultNetworkName, defaultNetworkID) if err != nil { panic(fmt.Errorf("could not reserve default network ID: %w", err)) } namedIDAllocator := networkIDAllocator.ForName(types.DefaultNetworkName) - return newNetworkClusterController(namedIDAllocator, netInfo, ovnClient, wf, recorder) + return newNetworkClusterController(namedIDAllocator, netInfo, ovnClient, wf, recorder, nil, nil) } func (ncc *networkClusterController) hasPodAllocation() bool { @@ -140,21 +156,61 @@ func (ncc *networkClusterController) hasNodeAllocation() bool { func (ncc *networkClusterController) allowPersistentIPs() bool { return config.OVNKubernetesFeature.EnablePersistentIPs && - ncc.NetInfo.AllowsPersistentIPs() && util.DoesNetworkRequireIPAM(ncc.NetInfo) && - (ncc.NetInfo.TopologyType() == types.Layer2Topology || ncc.NetInfo.TopologyType() == types.LocalnetTopology) + util.AllowsPersistentIPs(ncc.NetInfo) } func (ncc *networkClusterController) init() error { + // report no errors on restart, then propagate any new errors by the started handlers + if err := ncc.resetStatus(); err != nil { + return fmt.Errorf("failed to reset network status: %w", err) + } + networkID, err := ncc.networkIDAllocator.AllocateID() if err != nil { return err } + if util.DoesNetworkRequireTunnelIDs(ncc.NetInfo) { + ncc.tunnelIDAllocator = id.NewIDAllocator(ncc.GetNetworkName(), types.MaxLogicalPortTunnelKey) + if err != nil { + return fmt.Errorf("failed to create new id allocator for network %s: %w", ncc.GetNetworkName(), err) + } + // Reserve the id 0. We don't want to assign this id to any of the pods or nodes. + if err = ncc.tunnelIDAllocator.ReserveID("zero", util.NoID); err != nil { + return err + } + if util.IsNetworkSegmentationSupportEnabled() && ncc.IsPrimaryNetwork() { + // if the network is a primary L2 UDN network, then we need to reserve + // the IDs used by each node in this network's pod allocator + nodes, err := ncc.watchFactory.GetNodes() + if err != nil { + return fmt.Errorf("failed to list node objects: %w", err) + } + for _, node := range nodes { + tunnelID, err := util.ParseUDNLayer2NodeGRLRPTunnelIDs(node, ncc.GetNetworkName()) + if err != nil { + if util.IsAnnotationNotSetError(err) { + klog.Warningf("tunnelID annotation does not exist for the node %s for network %s, err: %v; we need to allocate it...", + node.Name, ncc.GetNetworkName(), err) + } else { + return fmt.Errorf("failed to fetch tunnelID annotation from the node %s for network %s, err: %v", + node.Name, ncc.GetNetworkName(), err) + } + } + if tunnelID != util.InvalidID { + if err := ncc.tunnelIDAllocator.ReserveID(ncc.GetNetworkName()+"_"+node.Name, tunnelID); err != nil { + return fmt.Errorf("unable to reserve id for network %s, node %s: %w", ncc.GetNetworkName(), node.Name, err) + } + } + } + } + } + if ncc.hasNodeAllocation() { ncc.retryNodes = ncc.newRetryFramework(factory.NodeType, true) - ncc.nodeAllocator = node.NewNodeAllocator(networkID, ncc.NetInfo, ncc.watchFactory.NodeCoreInformer().Lister(), ncc.kube) + ncc.nodeAllocator = node.NewNodeAllocator(networkID, ncc.NetInfo, ncc.watchFactory.NodeCoreInformer().Lister(), ncc.kube, ncc.tunnelIDAllocator) err := ncc.nodeAllocator.Init() if err != nil { return fmt.Errorf("failed to initialize host subnet ip allocator: %w", err) @@ -172,7 +228,6 @@ func (ncc *networkClusterController) init() error { var ( podAllocationAnnotator *annotationalloc.PodAnnotationAllocator ipamClaimsReconciler persistentips.PersistentAllocations - nadLister nadlister.NetworkAttachmentDefinitionLister ) if ncc.allowPersistentIPs() { @@ -191,11 +246,9 @@ func (ncc *networkClusterController) init() error { ncc.kube, ipamClaimsReconciler, ) - if util.IsNetworkSegmentationSupportEnabled() { - nadLister = ncc.watchFactory.NADInformer().Lister() - } + ncc.podAllocator = pod.NewPodAllocator(ncc.NetInfo, podAllocationAnnotator, ipAllocator, - ipamClaimsReconciler, nadLister, ncc.recorder) + ipamClaimsReconciler, ncc.nadController, ncc.recorder, ncc.tunnelIDAllocator) if err := ncc.podAllocator.Init(); err != nil { return fmt.Errorf("failed to initialize pod ip allocator: %w", err) } @@ -204,6 +257,96 @@ func (ncc *networkClusterController) init() error { return nil } +// updateNetworkStatus allows to report a status for networkClusterController's network via a UDN status condition +// of type "NetworkAllocationSucceeded", if the network was created by UDN. +// When at least one node reports an error, condition will be set to false and an event with node-specific error will be +// generated. +// Call this function after every node event handling, set handlerErr to nil to report no error. +// There are potential optimization to when an error should be reported, see https://github.com/ovn-org/ovn-kubernetes/pull/4647#discussion_r1763352619. +func (ncc *networkClusterController) updateNetworkStatus(nodeName string, handlerErr error) error { + if ncc.statusReporter == nil { + return nil + } + errorMsg := "" + if handlerErr != nil { + errorMsg = handlerErr.Error() + } + + ncc.nodeErrorsLock.Lock() + defer ncc.nodeErrorsLock.Unlock() + if ncc.nodeErrors[nodeName] == errorMsg { + // error message didn't change for that node, no need to update + return nil + } + + reportedErrorNode := ncc.reportedErrorNode + if ncc.reportedErrorNode == "" && errorMsg != "" { + reportedErrorNode = nodeName + } + if ncc.reportedErrorNode == nodeName && errorMsg == "" { + // error for this node is fixed, report next error node + reportedErrorNode = "" + for errorNode := range ncc.nodeErrors { + if errorNode != nodeName { + reportedErrorNode = errorNode + break + } + } + } + + var condition *metav1.Condition + if reportedErrorNode != ncc.reportedErrorNode { + // We know condition only changes if ncc.reportedErrorNode value changes. + // Otherwise, condition will stay nil and the error message will be reflected in an event. + condition = getNetworkAllocationUDNCondition(reportedErrorNode) + } + events := make([]*util.EventDetails, 0, 1) + if errorMsg != "" { + events = append(events, &util.EventDetails{ + EventType: util.EventTypeWarning, + Reason: "NetworkAllocationFailed", + Note: fmt.Sprintf("Error occurred for node %s: %s", nodeName, errorMsg), + }) + } + + netName := ncc.NetInfo.GetNetworkName() + if err := ncc.statusReporter(netName, "NetworkClusterController", condition, events...); err != nil { + return fmt.Errorf("failed to report network status: %w", err) + } + ncc.nodeErrors[nodeName] = errorMsg + ncc.reportedErrorNode = reportedErrorNode + + return nil +} + +// resetStatus should be called on startup before any handler is started to avoid status race. +func (ncc *networkClusterController) resetStatus() error { + if ncc.statusReporter == nil { + return nil + } + netName := ncc.NetInfo.GetNetworkName() + return ncc.statusReporter(netName, "NetworkClusterController", getNetworkAllocationUDNCondition("")) +} + +// We only report one failed node in condition to avoid too long messages and too many condition updates. +// The node to be reported is passed as errorNode, if empty, all nodes are considered to be succeeded. +func getNetworkAllocationUDNCondition(errorNode string) *metav1.Condition { + condition := &metav1.Condition{ + Type: "NetworkAllocationSucceeded", + LastTransitionTime: metav1.Now(), + } + if errorNode == "" { + condition.Status = metav1.ConditionTrue + condition.Reason = "NetworkAllocationSucceeded" + condition.Message = "Network allocation succeeded for all synced nodes." + } else { + condition.Status = metav1.ConditionFalse + condition.Reason = "InternalError" + condition.Message = fmt.Sprintf("Network allocation failed for at least one node: %v, check UDN events for more info.", errorNode) + } + return condition +} + // Start the network cluster controller. Depending on the cluster configuration // and type of network, it does the following: // - initializes the node allocator and starts listening to node events @@ -324,12 +467,17 @@ func (h *networkClusterControllerEventHandler) AddResource(obj interface{}, from if !ok { return fmt.Errorf("could not cast %T object to *corev1.Node", obj) } - if err = h.ncc.nodeAllocator.HandleAddUpdateNodeEvent(node); err != nil { + err = h.ncc.nodeAllocator.HandleAddUpdateNodeEvent(node) + if err == nil { + h.clearInitialNodeNetworkUnavailableCondition(node) + } + statusErr := h.ncc.updateNetworkStatus(node.Name, err) + joinedErr := errors.Join(err, statusErr) + if joinedErr != nil { klog.Infof("Node add failed for %s, will try again later: %v", - node.Name, err) - return err + node.Name, joinedErr) + return joinedErr } - h.clearInitialNodeNetworkUnavailableCondition(node) case factory.IPAMClaimsType: return nil default: @@ -363,12 +511,17 @@ func (h *networkClusterControllerEventHandler) UpdateResource(oldObj, newObj int if !ok { return fmt.Errorf("could not cast %T object to *corev1.Node", newObj) } - if err = h.ncc.nodeAllocator.HandleAddUpdateNodeEvent(node); err != nil { + err = h.ncc.nodeAllocator.HandleAddUpdateNodeEvent(node) + if err == nil { + h.clearInitialNodeNetworkUnavailableCondition(node) + } + statusErr := h.ncc.updateNetworkStatus(node.Name, err) + joinedErr := errors.Join(err, statusErr) + if joinedErr != nil { klog.Infof("Node update failed for %s, will try again later: %v", node.Name, err) return err } - h.clearInitialNodeNetworkUnavailableCondition(node) case factory.IPAMClaimsType: return nil default: @@ -395,7 +548,9 @@ func (h *networkClusterControllerEventHandler) DeleteResource(obj, cachedObj int if !ok { return fmt.Errorf("could not cast obj of type %T to *knet.Node", obj) } - return h.ncc.nodeAllocator.HandleDeleteNode(node) + err := h.ncc.nodeAllocator.HandleDeleteNode(node) + statusErr := h.ncc.updateNetworkStatus(node.Name, err) + return errors.Join(err, statusErr) case factory.IPAMClaimsType: ipamClaim, ok := obj.(*ipamclaimsapi.IPAMClaim) if !ok { diff --git a/go-controller/pkg/clustermanager/network_cluster_controller_test.go b/go-controller/pkg/clustermanager/network_cluster_controller_test.go index 0fdafc556f..19da02d62b 100644 --- a/go-controller/pkg/clustermanager/network_cluster_controller_test.go +++ b/go-controller/pkg/clustermanager/network_cluster_controller_test.go @@ -5,7 +5,7 @@ import ( "net" "sync" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "github.com/urfave/cli/v2" v1 "k8s.io/api/core/v1" diff --git a/go-controller/pkg/clustermanager/node/node_allocator.go b/go-controller/pkg/clustermanager/node/node_allocator.go index c1277c59fa..8629098812 100644 --- a/go-controller/pkg/clustermanager/node/node_allocator.go +++ b/go-controller/pkg/clustermanager/node/node_allocator.go @@ -15,6 +15,7 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" hotypes "github.com/ovn-org/ovn-kubernetes/go-controller/hybrid-overlay/pkg/types" houtil "github.com/ovn-org/ovn-kubernetes/go-controller/hybrid-overlay/pkg/util" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/id" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" ipgenerator "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/generator/ip" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" @@ -33,7 +34,8 @@ import ( type NodeAllocator struct { kube kube.Interface nodeLister listers.NodeLister - + // idAllocator of IDs within the network + idAllocator id.Allocator clusterSubnetAllocator SubnetAllocator hybridOverlaySubnetAllocator SubnetAllocator // node gateway router port IP generators (connecting to the join switch) @@ -46,7 +48,7 @@ type NodeAllocator struct { netInfo util.NetInfo } -func NewNodeAllocator(networkID int, netInfo util.NetInfo, nodeLister listers.NodeLister, kube kube.Interface) *NodeAllocator { +func NewNodeAllocator(networkID int, netInfo util.NetInfo, nodeLister listers.NodeLister, kube kube.Interface, tunnelIDAllocator id.Allocator) *NodeAllocator { na := &NodeAllocator{ kube: kube, nodeLister: nodeLister, @@ -54,6 +56,7 @@ func NewNodeAllocator(networkID int, netInfo util.NetInfo, nodeLister listers.No netInfo: netInfo, clusterSubnetAllocator: NewSubnetAllocator(), hybridOverlaySubnetAllocator: NewSubnetAllocator(), + idAllocator: tunnelIDAllocator, } if na.hasNodeSubnetAllocation() { @@ -276,14 +279,38 @@ func (na *NodeAllocator) syncNodeNetworkAnnotations(node *corev1.Node) error { updatedSubnetsMap[networkName] = validExistingSubnets } } + newTunnelID := util.NoID + if util.IsNetworkSegmentationSupportEnabled() && na.netInfo.IsPrimaryNetwork() && util.DoesNetworkRequireTunnelIDs(na.netInfo) { + existingTunnelID, err := util.ParseUDNLayer2NodeGRLRPTunnelIDs(node, networkName) + if err != nil && !util.IsAnnotationNotSetError(err) { + return fmt.Errorf("failed to fetch tunnelID annotation from the node %s for network %s, err: %v", + node.Name, networkName, err) + } + if existingTunnelID == util.InvalidID { + if newTunnelID, err = na.idAllocator.AllocateID(networkName + "_" + node.Name); err != nil { + return fmt.Errorf("failed to assign node %s tunnel id for network %s: %w", node.Name, networkName, err) + } + // This log should be printed only once at start up per network; per node + klog.V(4).Infof("Allocating node %s tunnelID %d for network %s", node.Name, newTunnelID, networkName) + } else { + // calling reserve on already reserved id for the same key is a no-op; so we are fine here + if err = na.idAllocator.ReserveID(networkName+"_"+node.Name, existingTunnelID); err != nil { + return fmt.Errorf("failed to reserve node %s tunnel id for network %s: %w", node.Name, networkName, err) + } + } + } // Also update the node annotation if the networkID doesn't match - if len(updatedSubnetsMap) > 0 || na.networkID != networkID || len(allocatedJoinSubnets) > 0 { - err = na.updateNodeNetworkAnnotationsWithRetry(node.Name, updatedSubnetsMap, na.networkID, allocatedJoinSubnets) + if len(updatedSubnetsMap) > 0 || na.networkID != networkID || len(allocatedJoinSubnets) > 0 || newTunnelID != util.NoID { + err = na.updateNodeNetworkAnnotationsWithRetry(node.Name, updatedSubnetsMap, na.networkID, newTunnelID, allocatedJoinSubnets) if err != nil { if errR := na.clusterSubnetAllocator.ReleaseNetworks(node.Name, allocatedSubnets...); errR != nil { klog.Warningf("Error releasing node %s subnets: %v", node.Name, errR) } + if util.IsNetworkSegmentationSupportEnabled() && na.netInfo.IsPrimaryNetwork() && util.DoesNetworkRequireTunnelIDs(na.netInfo) { + na.idAllocator.ReleaseID(networkName + "_" + node.Name) + klog.Infof("Releasing node %s tunnelID for network %s since annotation update failed", node.Name, networkName) + } return err } } @@ -351,7 +378,7 @@ func (na *NodeAllocator) Sync(nodes []interface{}) error { } // updateNodeNetworkAnnotationsWithRetry will update the node's subnet annotation and network id annotation -func (na *NodeAllocator) updateNodeNetworkAnnotationsWithRetry(nodeName string, hostSubnetsMap map[string][]*net.IPNet, networkId int, joinAddr []*net.IPNet) error { +func (na *NodeAllocator) updateNodeNetworkAnnotationsWithRetry(nodeName string, hostSubnetsMap map[string][]*net.IPNet, networkId, tunnelID int, joinAddr []*net.IPNet) error { // Retry if it fails because of potential conflict which is transient. Return error in the // case of other errors (say temporary API server down), and it will be taken care of by the // retry mechanism. @@ -366,8 +393,8 @@ func (na *NodeAllocator) updateNodeNetworkAnnotationsWithRetry(nodeName string, for netName, hostSubnets := range hostSubnetsMap { cnode.Annotations, err = util.UpdateNodeHostSubnetAnnotation(cnode.Annotations, hostSubnets, netName) if err != nil { - return fmt.Errorf("failed to update node %q annotation subnet %s", - node.Name, util.JoinIPNets(hostSubnets, ",")) + return fmt.Errorf("failed to update node %q annotation subnet %s: %w", + node.Name, util.JoinIPNets(hostSubnets, ","), err) } } @@ -375,21 +402,27 @@ func (na *NodeAllocator) updateNodeNetworkAnnotationsWithRetry(nodeName string, cnode.Annotations, err = util.UpdateNodeGatewayRouterLRPAddrsAnnotation(cnode.Annotations, joinAddr, networkName) if err != nil { - return fmt.Errorf("failed to update node %q annotation LRPAddrAnnotation %s", - node.Name, util.JoinIPNets(joinAddr, ",")) + return fmt.Errorf("failed to update node %q annotation LRPAddrAnnotation %s: %w", + node.Name, util.JoinIPNets(joinAddr, ","), err) } - cnode.Annotations, err = util.UpdateNetworkIDAnnotation(cnode.Annotations, networkName, networkId) if err != nil { - return fmt.Errorf("failed to update node %q network id annotation %d for network %s", - node.Name, networkId, networkName) + return fmt.Errorf("failed to update node %q network id annotation %d for network %s: %w", + node.Name, networkId, networkName, err) + } + if tunnelID != util.NoID { + cnode.Annotations, err = util.UpdateUDNLayer2NodeGRLRPTunnelIDs(cnode.Annotations, networkName, tunnelID) + if err != nil { + return fmt.Errorf("failed to update node %q tunnel id annotation %d for network %s: %w", + node.Name, tunnelID, networkName, err) + } } // It is possible to update the node annotations using status subresource // because changes to metadata via status subresource are not restricted for nodes. return na.kube.UpdateNodeStatus(cnode) }) if resultErr != nil { - return fmt.Errorf("failed to update node %s annotation", nodeName) + return fmt.Errorf("failed to update node %s annotation: %w", nodeName, resultErr) } return nil } @@ -413,8 +446,8 @@ func (na *NodeAllocator) Cleanup() error { } hostSubnetsMap := map[string][]*net.IPNet{networkName: nil} - // passing util.InvalidNetworkID deletes the network id annotation for the network. - err = na.updateNodeNetworkAnnotationsWithRetry(node.Name, hostSubnetsMap, util.InvalidNetworkID, nil) + // passing util.InvalidID deletes the network/tunnel id annotation for the network. + err = na.updateNodeNetworkAnnotationsWithRetry(node.Name, hostSubnetsMap, util.InvalidID, util.InvalidID, nil) if err != nil { return fmt.Errorf("failed to clear node %q subnet annotation for network %s", node.Name, networkName) diff --git a/go-controller/pkg/clustermanager/pod/allocator.go b/go-controller/pkg/clustermanager/pod/allocator.go index 36f06ba7f1..24b9591aff 100644 --- a/go-controller/pkg/clustermanager/pod/allocator.go +++ b/go-controller/pkg/clustermanager/pod/allocator.go @@ -14,11 +14,11 @@ import ( nettypes "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" - nadlister "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/listers/k8s.cni.cncf.io/v1" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/id" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/ip/subnet" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/pod" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + nad "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/network-attach-def-controller" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/persistentips" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" @@ -41,7 +41,7 @@ type PodAllocator struct { ipamClaimsReconciler persistentips.PersistentAllocations - nadLister nadlister.NetworkAttachmentDefinitionLister + nadController nad.NADController // event recorder used to post events to k8s recorder record.EventRecorder @@ -58,16 +58,18 @@ func NewPodAllocator( podAnnotationAllocator *pod.PodAnnotationAllocator, ipAllocator subnet.Allocator, claimsReconciler persistentips.PersistentAllocations, - nadLister nadlister.NetworkAttachmentDefinitionLister, + nadController nad.NADController, recorder record.EventRecorder, + idAllocator id.Allocator, ) *PodAllocator { podAllocator := &PodAllocator{ netInfo: netInfo, releasedPods: map[string]sets.Set[string]{}, releasedPodsMutex: sync.Mutex{}, podAnnotationAllocator: podAnnotationAllocator, - nadLister: nadLister, + nadController: nadController, recorder: recorder, + idAllocator: idAllocator, } // this network might not have IPAM, we will just allocate MAC addresses @@ -81,21 +83,8 @@ func NewPodAllocator( return podAllocator } -// Init initializes the allocator with as configured for the network +// Init checks if persistentIPs controller elements are correctly configured for the network func (a *PodAllocator) Init() error { - var err error - if util.DoesNetworkRequireTunnelIDs(a.netInfo) { - a.idAllocator, err = id.NewIDAllocator(a.netInfo.GetNetworkName(), types.MaxLogicalPortTunnelKey) - if err != nil { - return err - } - // Reserve the id 0. We don't want to assign this id to any of the pods. - err = a.idAllocator.ReserveID("zero", 0) - if err != nil { - return err - } - } - if a.netInfo.AllowsPersistentIPs() && a.ipamClaimsReconciler == nil { return fmt.Errorf( "network %q allows persistent IPs but missing the claims reconciler", @@ -107,11 +96,11 @@ func (a *PodAllocator) Init() error { } // getActiveNetworkForNamespace returns the active network for the given pod's namespace -// and is a wrapper around util.GetActiveNetworkForNamespace +// and is a wrapper around GetActiveNetworkForNamespace func (a *PodAllocator) getActiveNetworkForPod(pod *corev1.Pod) (util.NetInfo, error) { - activeNetwork, err := util.GetActiveNetworkForNamespace(pod.Namespace, a.nadLister) + activeNetwork, err := a.nadController.GetActiveNetworkForNamespace(pod.Namespace) if err != nil { - if util.IsUnknownActiveNetworkError(err) { + if util.IsUnprocessedActiveNetworkError(err) { a.recordPodErrorEvent(pod, err) } return nil, err diff --git a/go-controller/pkg/clustermanager/pod/allocator_test.go b/go-controller/pkg/clustermanager/pod/allocator_test.go index e244f1af6e..14a1a947f2 100644 --- a/go-controller/pkg/clustermanager/pod/allocator_test.go +++ b/go-controller/pkg/clustermanager/pod/allocator_test.go @@ -18,6 +18,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/persistentips" ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/nad" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" @@ -29,14 +30,12 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" apitypes "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/record" kubemocks "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube/mocks" - v1nadmocks "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/mocks/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/listers/k8s.cni.cncf.io/v1" v1mocks "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/mocks/k8s.io/client-go/listers/core/v1" ) @@ -99,7 +98,7 @@ func (a *ipAllocatorStub) AllocateUntilFull(name string) error { panic("not implemented") // TODO: Implement } -func (a *ipAllocatorStub) AllocateIPs(name string, ips []*net.IPNet) error { +func (a *ipAllocatorStub) AllocateIPPerSubnet(name string, ips []*net.IPNet) error { panic("not implemented") // TODO: Implement } @@ -513,22 +512,6 @@ func TestPodAllocator_reconcileForNAD(t *testing.T) { expectError: "failed to get NAD to network mapping: unexpected primary network \"\" specified with a NetworkSelectionElement &{Name:nad Namespace:namespace IPRequest:[] MacRequest: InfinibandGUIDRequest: InterfaceRequest: PortMappingsRequest:[] BandwidthRequest: CNIArgs: GatewayRequest:[] IPAMClaimReference:}", expectEvents: []string{"Warning ErrorAllocatingPod unexpected primary network \"\" specified with a NetworkSelectionElement &{Name:nad Namespace:namespace IPRequest:[] MacRequest: InfinibandGUIDRequest: InterfaceRequest: PortMappingsRequest:[] BandwidthRequest: CNIArgs: GatewayRequest:[] IPAMClaimReference:}"}, }, - { - name: "Pod on a namespace with multiple primary networks; expect event and error", - args: args{ - new: &testPod{ - scheduled: true, - }, - nads: []*nadapi.NetworkAttachmentDefinition{ - ovntest.GenerateNAD("surya", "miguel", "namespace", - types.Layer3Topology, "100.128.0.0/16", types.NetworkRolePrimary), - ovntest.GenerateNAD("surya", "miguel", "namespace", - types.Layer2Topology, "10.100.200.0/24", types.NetworkRolePrimary), - }, - }, - expectError: "failed looking for an active network: unable to determine what is the primary role network for namespace 'namespace'; please remove multiple primary role networkNADs from it", - expectEvents: []string{"Warning ErrorAllocatingPod unable to determine what is the primary role network for namespace 'namespace'; please remove multiple primary role networkNADs from it"}, - }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -569,6 +552,8 @@ func TestPodAllocator_reconcileForNAD(t *testing.T) { config.OVNKubernetesFeature.EnableInterconnect = tt.idAllocation + // config.IPv4Mode needs to be set so that the ipv4 of the userdefined primary networks can match the running cluster + config.IPv4Mode = true netInfo, err := util.NewNetInfo(netConf) if err != nil { t.Fatalf("Invalid netConf") @@ -594,16 +579,20 @@ func TestPodAllocator_reconcileForNAD(t *testing.T) { ipamClaimsReconciler, ) - nadLister := v1nadmocks.NetworkAttachmentDefinitionLister{} - nadNamespaceLister := v1nadmocks.NetworkAttachmentDefinitionNamespaceLister{} - nadLister.On("NetworkAttachmentDefinitions", "namespace").Return(&nadNamespaceLister) - mockedNADs := []*nadapi.NetworkAttachmentDefinition{} + testNs := "namespace" + nadNetworks := map[string]util.NetInfo{} for _, nad := range tt.args.nads { - if nad.Namespace == "namespace" { - mockedNADs = append(mockedNADs, nad) + if nad.Namespace == testNs { + nadNetwork, _ := util.ParseNADInfo(nad) + if nadNetwork.IsPrimaryNetwork() { + if _, ok := nadNetworks[testNs]; !ok { + nadNetworks[testNs] = nadNetwork + } + } } } - nadNamespaceLister.On("List", labels.Everything()).Return(mockedNADs, nil) + + nadController := &nad.FakeNADController{PrimaryNetworks: nadNetworks} fakeRecorder := record.NewFakeRecorder(10) @@ -619,7 +608,7 @@ func TestPodAllocator_reconcileForNAD(t *testing.T) { releasedPodsMutex: sync.Mutex{}, ipamClaimsReconciler: ipamClaimsReconciler, recorder: fakeRecorder, - nadLister: &nadLister, + nadController: nadController, } var old, new *corev1.Pod diff --git a/go-controller/pkg/clustermanager/secondary_network_cluster_manager.go b/go-controller/pkg/clustermanager/secondary_network_cluster_manager.go index 9dc5ddd6e5..36201b2df9 100644 --- a/go-controller/pkg/clustermanager/secondary_network_cluster_manager.go +++ b/go-controller/pkg/clustermanager/secondary_network_cluster_manager.go @@ -34,15 +34,14 @@ type secondaryNetworkClusterManager struct { // event recorder used to post events to k8s recorder record.EventRecorder + + errorReporter NetworkStatusReporter } -func newSecondaryNetworkClusterManager(ovnClient *util.OVNClusterManagerClientset, wf *factory.WatchFactory, recorder record.EventRecorder) (*secondaryNetworkClusterManager, error) { +func newSecondaryNetworkClusterManager(ovnClient *util.OVNClusterManagerClientset, wf *factory.WatchFactory, + recorder record.EventRecorder) (*secondaryNetworkClusterManager, error) { klog.Infof("Creating secondary network cluster manager") - networkIDAllocator, err := id.NewIDAllocator("NetworkIDs", maxSecondaryNetworkIDs) - if err != nil { - return nil, fmt.Errorf("failed to create an IdAllocator for the secondary network ids, err: %v", err) - } - + networkIDAllocator := id.NewIDAllocator("NetworkIDs", maxSecondaryNetworkIDs) // Reserve the id 0 for the default network. if err := networkIDAllocator.ReserveID(ovntypes.DefaultNetworkName, defaultNetworkID); err != nil { return nil, fmt.Errorf("idAllocator failed to reserve defaultNetworkID %d", defaultNetworkID) @@ -53,14 +52,18 @@ func newSecondaryNetworkClusterManager(ovnClient *util.OVNClusterManagerClientse networkIDAllocator: networkIDAllocator, recorder: recorder, } - - sncm.nadController, err = nad.NewNetAttachDefinitionController("cluster-manager", sncm, wf) + var err error + sncm.nadController, err = nad.NewNetAttachDefinitionController("cluster-manager", sncm, wf, recorder) if err != nil { return nil, err } return sncm, nil } +func (sncm *secondaryNetworkClusterManager) SetNetworkStatusReporter(errorReporter NetworkStatusReporter) { + sncm.errorReporter = errorReporter +} + // Start the secondary network controller, handles all events and creates all // needed logical entities func (sncm *secondaryNetworkClusterManager) Start() error { @@ -114,7 +117,8 @@ func (sncm *secondaryNetworkClusterManager) NewNetworkController(nInfo util.NetI klog.Infof("Creating new network controller for network %s of topology %s", nInfo.GetNetworkName(), nInfo.TopologyType()) namedIDAllocator := sncm.networkIDAllocator.ForName(nInfo.GetNetworkName()) - sncc := newNetworkClusterController(namedIDAllocator, nInfo, sncm.ovnClient, sncm.watchFactory, sncm.recorder) + sncc := newNetworkClusterController(namedIDAllocator, nInfo, sncm.ovnClient, sncm.watchFactory, sncm.recorder, + sncm.nadController, sncm.errorReporter) return sncc, nil } @@ -194,7 +198,8 @@ func (sncm *secondaryNetworkClusterManager) CleanupDeletedNetworks(validNetworks func (sncm *secondaryNetworkClusterManager) newDummyLayer3NetworkController(netName string) (nad.NetworkController, error) { netInfo, _ := util.NewNetInfo(&ovncnitypes.NetConf{NetConf: types.NetConf{Name: netName}, Topology: ovntypes.Layer3Topology}) namedIDAllocator := sncm.networkIDAllocator.ForName(netInfo.GetNetworkName()) - nc := newNetworkClusterController(namedIDAllocator, netInfo, sncm.ovnClient, sncm.watchFactory, sncm.recorder) + nc := newNetworkClusterController(namedIDAllocator, netInfo, sncm.ovnClient, sncm.watchFactory, sncm.recorder, + sncm.nadController, nil) err := nc.init() return nc, err } diff --git a/go-controller/pkg/clustermanager/secondary_network_unit_test.go b/go-controller/pkg/clustermanager/secondary_network_unit_test.go index 2e2628d56c..f1c98c643b 100644 --- a/go-controller/pkg/clustermanager/secondary_network_unit_test.go +++ b/go-controller/pkg/clustermanager/secondary_network_unit_test.go @@ -8,7 +8,7 @@ import ( "sync" "github.com/containernetworking/cni/pkg/types" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "github.com/urfave/cli/v2" v1 "k8s.io/api/core/v1" @@ -114,6 +114,7 @@ var _ = ginkgo.Describe("Cluster Controller Manager", func() { ) ginkgo.BeforeEach(func() { + fakeClient = &util.OVNClusterManagerClientset{ KubeClient: fake.NewSimpleClientset(&v1.NodeList{Items: nodes()}), IPAMClaimsClient: fakeipamclaimclient.NewSimpleClientset(), @@ -191,6 +192,8 @@ var _ = ginkgo.Describe("Cluster Controller Manager", func() { sncm.ovnClient, sncm.watchFactory, sncm.recorder, + sncm.nadController, + nil, ) gomega.Expect(nc.init()).To(gomega.Succeed()) gomega.Expect(nc.Start(ctx.Context)).To(gomega.Succeed()) @@ -207,6 +210,88 @@ var _ = ginkgo.Describe("Cluster Controller Manager", func() { }) }) + ginkgo.When("Attaching to a localnet network", func() { + const subnets = "192.168.200.0/24,fd12:1234::0/64" + + var ( + fakeClient *util.OVNClusterManagerClientset + netInfo util.NetInfo + ) + + ginkgo.BeforeEach(func() { + fakeClient = &util.OVNClusterManagerClientset{ + KubeClient: fake.NewSimpleClientset(&v1.NodeList{Items: nodes()}), + NetworkAttchDefClient: fakenadclient.NewSimpleClientset(), + } + + gomega.Expect(config.PrepareTestConfig()).To(gomega.Succeed()) + }) + + ginkgo.DescribeTable( + "the secondary network controller", + func(netConf *ovncnitypes.NetConf, featureConfig config.OVNKubernetesFeatureConfig, expectedError error) { + var err error + netInfo, err = util.NewNetInfo(netConf) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + app.Action = func(ctx *cli.Context) error { + gomega.Expect(initConfig(ctx, featureConfig)).To(gomega.Succeed()) + + f, err = factory.NewClusterManagerWatchFactory(fakeClient) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = f.Start() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + sncm, err := newSecondaryNetworkClusterManager(fakeClient, f, recorder) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + _, err = sncm.NewNetworkController(netInfo) + if expectedError == nil { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + gomega.Expect(err).To(gomega.MatchError(expectedError)) + } + + return nil + } + + gomega.Expect(app.Run([]string{app.Name})).To(gomega.Succeed()) + }, + ginkgo.Entry( + "does not manage localnet topologies on IC deployments for networks without subnets", + &ovncnitypes.NetConf{NetConf: types.NetConf{Name: "blue"}, Topology: ovntypes.LocalnetTopology}, + config.OVNKubernetesFeatureConfig{EnableInterconnect: true, EnableMultiNetwork: true}, + nad.ErrNetworkControllerTopologyNotManaged, + ), + ginkgo.Entry( + "manages localnet topologies on IC deployments for networks with subnets", + &ovncnitypes.NetConf{ + NetConf: types.NetConf{Name: "blue"}, + Topology: ovntypes.LocalnetTopology, + Subnets: subnets, + }, + config.OVNKubernetesFeatureConfig{EnableInterconnect: true, EnableMultiNetwork: true}, + nil, + ), + ginkgo.Entry( + "does not manage localnet topologies on non-IC deployments without subnets", + &ovncnitypes.NetConf{NetConf: types.NetConf{Name: "blue"}, Topology: ovntypes.LocalnetTopology}, + config.OVNKubernetesFeatureConfig{EnableMultiNetwork: true}, + nad.ErrNetworkControllerTopologyNotManaged, + ), + ginkgo.Entry( + "does not manage localnet topologies on non-IC deployments with subnets", + &ovncnitypes.NetConf{ + NetConf: types.NetConf{Name: "blue"}, + Topology: ovntypes.LocalnetTopology, + Subnets: subnets, + }, + config.OVNKubernetesFeatureConfig{EnableMultiNetwork: true}, + nad.ErrNetworkControllerTopologyNotManaged, + ), + ) + }) + ginkgo.It("Cleanup", func() { app.Action = func(ctx *cli.Context) error { nodes := []v1.Node{ @@ -317,6 +402,8 @@ var _ = ginkgo.Describe("Cluster Controller Manager", func() { sncm.ovnClient, sncm.watchFactory, sncm.recorder, + sncm.nadController, + nil, ) err = oc.init() gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -354,10 +441,12 @@ var _ = ginkgo.Describe("Cluster Controller Manager", func() { ginkgo.Context("persistent IP allocations", func() { const ( claimName = "claim1" + claimName2 = "claim2" namespace = "ns" networkName = "blue" subnetCIDR = "192.168.200.0/24" subnetIP = "192.168.200.2/24" + subnetIP2 = "192.168.200.3/24" ) var netInfo util.NetInfo @@ -382,6 +471,7 @@ var _ = ginkgo.Describe("Cluster Controller Manager", func() { KubeClient: fake.NewSimpleClientset(), IPAMClaimsClient: fakeipamclaimclient.NewSimpleClientset( ipamClaimWithIPAddr(claimName, namespace, networkName, subnetIP), + ipamClaimWithIPAddr(claimName2, namespace, networkName, subnetIP2), ), NetworkAttchDefClient: fakenadclient.NewSimpleClientset(), } @@ -407,14 +497,21 @@ var _ = ginkgo.Describe("Cluster Controller Manager", func() { sncm.ovnClient, sncm.watchFactory, sncm.recorder, + sncm.nadController, + nil, ) gomega.Expect(nc.init()).To(gomega.Succeed()) gomega.Expect(nc.Start(ctx.Context)).To(gomega.Succeed()) ips, err := util.ParseIPNets([]string{subnetIP}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(nc.subnetAllocator.AllocateIPPerSubnet(netInfo.GetNetworkName(), ips)).To( + gomega.Equal(ip.ErrAllocated)) - gomega.Expect(nc.subnetAllocator.AllocateIPs(netInfo.GetNetworkName(), ips)).To(gomega.Equal(ip.ErrAllocated)) + ips2, err := util.ParseIPNets([]string{subnetIP2}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(nc.subnetAllocator.AllocateIPPerSubnet(netInfo.GetNetworkName(), ips2)).To( + gomega.Equal(ip.ErrAllocated)) return nil } @@ -455,6 +552,8 @@ var _ = ginkgo.Describe("Cluster Controller Manager", func() { sncm.ovnClient, sncm.watchFactory, sncm.recorder, + sncm.nadController, + nil, ) gomega.Expect(nc.init()).To(gomega.Succeed()) gomega.Expect(nc.Start(ctx.Context)).To(gomega.Succeed()) @@ -462,7 +561,7 @@ var _ = ginkgo.Describe("Cluster Controller Manager", func() { ips, err := util.ParseIPNets([]string{subnetIP}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(nc.subnetAllocator.AllocateIPs(netInfo.GetNetworkName(), ips)).To(gomega.Succeed()) + gomega.Expect(nc.subnetAllocator.AllocateIPPerSubnet(netInfo.GetNetworkName(), ips)).To(gomega.Succeed()) return nil } @@ -504,6 +603,8 @@ var _ = ginkgo.Describe("Cluster Controller Manager", func() { sncm.ovnClient, sncm.watchFactory, sncm.recorder, + sncm.nadController, + nil, ) gomega.Expect(nc.init()).To(gomega.Succeed()) gomega.Expect(nc.Start(ctx.Context)).To(gomega.Succeed()) @@ -573,6 +674,8 @@ var _ = ginkgo.Describe("Cluster Controller Manager", func() { sncm.ovnClient, sncm.watchFactory, sncm.recorder, + sncm.nadController, + nil, ) gomega.Expect(nc.init()).To(gomega.Succeed()) gomega.Expect(nc.Start(ctx.Context)).To(gomega.Succeed()) @@ -645,6 +748,8 @@ var _ = ginkgo.Describe("Cluster Controller Manager", func() { sncm.ovnClient, sncm.watchFactory, sncm.recorder, + sncm.nadController, + nil, ) gomega.Expect(nc.init()).To(gomega.Succeed()) gomega.Expect(nc.Start(ctx.Context)).To(gomega.Succeed()) @@ -674,24 +779,6 @@ var _ = ginkgo.Describe("Cluster Controller Manager", func() { netInfo util.NetInfo ) - ginkgo.BeforeEach(func() { - var err error - netInfo, err = util.NewNetInfo( - &ovncnitypes.NetConf{ - NetConf: types.NetConf{Name: "blue"}, - Role: ovntypes.NetworkRolePrimary, - Subnets: subnets, - Topology: ovntypes.Layer2Topology, - }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - fakeClient = &util.OVNClusterManagerClientset{ - KubeClient: fake.NewSimpleClientset(&v1.NodeList{Items: nodes()}), - IPAMClaimsClient: fakeipamclaimclient.NewSimpleClientset(), - NetworkAttchDefClient: fakenadclient.NewSimpleClientset(), - } - }) - ginkgo.It("Automatically reserves IPs for the GW (.1) and mgmt port (.2)", func() { app.Action = func(ctx *cli.Context) error { gomega.Expect( @@ -701,6 +788,20 @@ var _ = ginkgo.Describe("Cluster Controller Manager", func() { )).To(gomega.Succeed()) var err error + netInfo, err = util.NewNetInfo( + &ovncnitypes.NetConf{ + NetConf: types.NetConf{Name: "blue"}, + Role: ovntypes.NetworkRolePrimary, + Subnets: subnets, + Topology: ovntypes.Layer2Topology, + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + fakeClient = &util.OVNClusterManagerClientset{ + KubeClient: fake.NewSimpleClientset(&v1.NodeList{Items: nodes()}), + IPAMClaimsClient: fakeipamclaimclient.NewSimpleClientset(), + NetworkAttchDefClient: fakenadclient.NewSimpleClientset(), + } f, err = factory.NewClusterManagerWatchFactory(fakeClient) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(f.Start()).NotTo(gomega.HaveOccurred()) @@ -715,6 +816,8 @@ var _ = ginkgo.Describe("Cluster Controller Manager", func() { sncm.ovnClient, sncm.watchFactory, sncm.recorder, + sncm.nadController, + nil, ) gomega.Expect(nc.init()).To(gomega.Succeed()) gomega.Expect(nc.Start(ctx.Context)).To(gomega.Succeed()) @@ -740,7 +843,12 @@ var _ = ginkgo.Describe("Cluster Controller Manager", func() { return nil } - gomega.Expect(app.Run([]string{app.Name})).To(gomega.Succeed()) + gomega.Expect(app.Run([]string{ + app.Name, + // define the cluster as dualstack so the user defined primary network matches the ip family + "--cluster-subnets=10.128.0.0/14,fd00:10:244::/48", + "--k8s-service-cidrs=172.16.1.0/24,fd02::/112", + })).To(gomega.Succeed()) }) }) diff --git a/go-controller/pkg/clustermanager/status_manager/status_manager.go b/go-controller/pkg/clustermanager/status_manager/status_manager.go index be944502cc..60ef9d0e0a 100644 --- a/go-controller/pkg/clustermanager/status_manager/status_manager.go +++ b/go-controller/pkg/clustermanager/status_manager/status_manager.go @@ -62,7 +62,7 @@ func newStatusManager[T any](name string, informer cache.SharedIndexInformer, controllerConfig := &controller.ControllerConfig[T]{ Informer: informer, Lister: lister, - RateLimiter: workqueue.NewItemFastSlowRateLimiter(time.Second, 5*time.Second, 5), + RateLimiter: workqueue.NewTypedItemFastSlowRateLimiter[string](time.Second, 5*time.Second, 5), ObjNeedsUpdate: m.needsUpdate, Reconcile: m.updateStatus, Threadiness: 1, diff --git a/go-controller/pkg/clustermanager/status_manager/status_manager_suite_test.go b/go-controller/pkg/clustermanager/status_manager/status_manager_suite_test.go index 65b35710ac..79efa28f60 100644 --- a/go-controller/pkg/clustermanager/status_manager/status_manager_suite_test.go +++ b/go-controller/pkg/clustermanager/status_manager/status_manager_suite_test.go @@ -3,7 +3,7 @@ package status_manager import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/clustermanager/status_manager/status_manager_test.go b/go-controller/pkg/clustermanager/status_manager/status_manager_test.go index 240003af6b..f36953fb79 100644 --- a/go-controller/pkg/clustermanager/status_manager/status_manager_test.go +++ b/go-controller/pkg/clustermanager/status_manager/status_manager_test.go @@ -6,7 +6,7 @@ import ( "strings" "sync/atomic" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" . "github.com/onsi/gomega" clienttesting "k8s.io/client-go/testing" diff --git a/go-controller/pkg/clustermanager/status_manager/zone_tracker/zone_tracker.go b/go-controller/pkg/clustermanager/status_manager/zone_tracker/zone_tracker.go index fa669e27a9..4c84f32ee3 100644 --- a/go-controller/pkg/clustermanager/status_manager/zone_tracker/zone_tracker.go +++ b/go-controller/pkg/clustermanager/status_manager/zone_tracker/zone_tracker.go @@ -68,7 +68,7 @@ func NewZoneTracker(nodeInformer coreinformers.NodeInformer, onZonesUpdate func( } controllerConfig := &controller.ControllerConfig[corev1.Node]{ - RateLimiter: workqueue.NewItemFastSlowRateLimiter(time.Second, 5*time.Second, 5), + RateLimiter: workqueue.NewTypedItemFastSlowRateLimiter[string](time.Second, 5*time.Second, 5), Informer: nodeInformer.Informer(), Lister: nodeInformer.Lister().List, ObjNeedsUpdate: zt.needsUpdate, diff --git a/go-controller/pkg/clustermanager/status_manager/zone_tracker/zone_tracker_suite_test.go b/go-controller/pkg/clustermanager/status_manager/zone_tracker/zone_tracker_suite_test.go index 4b35f0c89d..8309a47a0b 100644 --- a/go-controller/pkg/clustermanager/status_manager/zone_tracker/zone_tracker_suite_test.go +++ b/go-controller/pkg/clustermanager/status_manager/zone_tracker/zone_tracker_suite_test.go @@ -3,7 +3,7 @@ package zone_tracker import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/clustermanager/status_manager/zone_tracker/zone_tracker_test.go b/go-controller/pkg/clustermanager/status_manager/zone_tracker/zone_tracker_test.go index f82b0ed46c..330bd9cb9b 100644 --- a/go-controller/pkg/clustermanager/status_manager/zone_tracker/zone_tracker_test.go +++ b/go-controller/pkg/clustermanager/status_manager/zone_tracker/zone_tracker_test.go @@ -3,7 +3,7 @@ package zone_tracker import ( "context" "fmt" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "sync/atomic" "time" diff --git a/go-controller/pkg/clustermanager/userdefinednetwork/controller.go b/go-controller/pkg/clustermanager/userdefinednetwork/controller.go index 7b073d2097..8aa06120a5 100644 --- a/go-controller/pkg/clustermanager/userdefinednetwork/controller.go +++ b/go-controller/pkg/clustermanager/userdefinednetwork/controller.go @@ -2,20 +2,25 @@ package userdefinednetwork import ( "context" - "encoding/json" "errors" "fmt" - "reflect" "slices" + "strings" + "sync" "time" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" + metaapplyv1 "k8s.io/client-go/applyconfigurations/meta/v1" corev1informer "k8s.io/client-go/informers/core/v1" "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/tools/reference" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" netv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" @@ -26,34 +31,55 @@ import ( userdefinednetworkv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1" udnapplyconfkv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1" userdefinednetworkclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned" + userdefinednetworkscheme "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/scheme" userdefinednetworkinformer "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/userdefinednetwork/v1" userdefinednetworklister "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/listers/userdefinednetwork/v1" - nadnotifier "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/clustermanager/userdefinednetwork/notifier" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/clustermanager/userdefinednetwork/notifier" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/clustermanager/userdefinednetwork/template" - cnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/controller" - ovntypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) -type RenderNetAttachDefManifest func(*userdefinednetworkv1.UserDefinedNetwork) (*netv1.NetworkAttachmentDefinition, error) +type RenderNetAttachDefManifest func(obj client.Object, targetNamespace string) (*netv1.NetworkAttachmentDefinition, error) -type Controller struct { - Controller controller.Controller - - udnClient userdefinednetworkclientset.Interface - udnLister userdefinednetworklister.UserDefinedNetworkLister +type networkInUseError struct { + err error +} - nadNotifier *nadnotifier.NetAttachDefNotifier - nadClient netv1clientset.Interface - nadLister netv1lister.NetworkAttachmentDefinitionLister +func (n *networkInUseError) Error() string { + return n.err.Error() +} +type Controller struct { + // cudnController manage ClusterUserDefinedNetwork CRs. + cudnController controller.Controller + // udnController manage UserDefinedNetwork CRs. + udnController controller.Controller + // nadNotifier notifies subscribing controllers about NetworkAttachmentDefinition events. + nadNotifier *notifier.NetAttachDefNotifier + // namespaceInformer notifies subscribing controllers about Namespace events. + namespaceNotifier *notifier.NamespaceNotifier + // namespaceTracker tracks each CUDN CRs affected namespaces, enable finding stale NADs. + // Keys are CR name, value is affected namespace names slice. + namespaceTracker map[string]sets.Set[string] + namespaceTrackerLock sync.RWMutex + // renderNadFn render NAD manifest from given object, enable replacing in tests. renderNadFn RenderNetAttachDefManifest - - podInformer corev1informer.PodInformer + // createNetworkLock lock should be held when NAD is created to avoid having two components + // trying to create an object with the same name. + createNetworkLock sync.Mutex + + udnClient userdefinednetworkclientset.Interface + udnLister userdefinednetworklister.UserDefinedNetworkLister + cudnLister userdefinednetworklister.ClusterUserDefinedNetworkLister + nadClient netv1clientset.Interface + nadLister netv1lister.NetworkAttachmentDefinitionLister + podInformer corev1informer.PodInformer + namespaceInformer corev1informer.NamespaceInformer networkInUseRequeueInterval time.Duration + eventRecorder record.EventRecorder } const defaultNetworkInUseCheckInterval = 1 * time.Minute @@ -63,61 +89,271 @@ func New( nadInfomer netv1infomer.NetworkAttachmentDefinitionInformer, udnClient userdefinednetworkclientset.Interface, udnInformer userdefinednetworkinformer.UserDefinedNetworkInformer, + cudnInformer userdefinednetworkinformer.ClusterUserDefinedNetworkInformer, renderNadFn RenderNetAttachDefManifest, podInformer corev1informer.PodInformer, + namespaceInformer corev1informer.NamespaceInformer, + eventRecorder record.EventRecorder, ) *Controller { udnLister := udnInformer.Lister() + cudnLister := cudnInformer.Lister() c := &Controller{ nadClient: nadClient, nadLister: nadInfomer.Lister(), udnClient: udnClient, udnLister: udnLister, + cudnLister: cudnLister, renderNadFn: renderNadFn, podInformer: podInformer, + namespaceInformer: namespaceInformer, networkInUseRequeueInterval: defaultNetworkInUseCheckInterval, + namespaceTracker: map[string]sets.Set[string]{}, + eventRecorder: eventRecorder, } - cfg := &controller.ControllerConfig[userdefinednetworkv1.UserDefinedNetwork]{ - RateLimiter: workqueue.DefaultControllerRateLimiter(), - Reconcile: c.reconcile, + udnCfg := &controller.ControllerConfig[userdefinednetworkv1.UserDefinedNetwork]{ + RateLimiter: workqueue.DefaultTypedControllerRateLimiter[string](), + Reconcile: c.reconcileUDN, ObjNeedsUpdate: c.udnNeedUpdate, Threadiness: 1, Informer: udnInformer.Informer(), Lister: udnLister.List, } - c.Controller = controller.NewController[userdefinednetworkv1.UserDefinedNetwork]("user-defined-network-controller", cfg) + c.udnController = controller.NewController[userdefinednetworkv1.UserDefinedNetwork]("user-defined-network-controller", udnCfg) + + cudnCfg := &controller.ControllerConfig[userdefinednetworkv1.ClusterUserDefinedNetwork]{ + RateLimiter: workqueue.DefaultTypedControllerRateLimiter[string](), + Reconcile: c.reconcileCUDN, + ObjNeedsUpdate: c.cudnNeedUpdate, + Threadiness: 1, + Informer: cudnInformer.Informer(), + Lister: cudnLister.List, + } + c.cudnController = controller.NewController[userdefinednetworkv1.ClusterUserDefinedNetwork]("cluster-user-defined-network-controller", cudnCfg) - c.nadNotifier = nadnotifier.NewNetAttachDefNotifier(nadInfomer, c) + c.nadNotifier = notifier.NewNetAttachDefNotifier(nadInfomer, c) + c.namespaceNotifier = notifier.NewNamespaceNotifier(namespaceInformer, c) return c } -func (c *Controller) ReconcileNetAttachDef(key string) { - // enqueue network-attachment-definitions requests in the controller workqueue - c.Controller.Reconcile(key) +func (c *Controller) Run() error { + klog.Infof("Starting user-defined network controllers") + if err := controller.StartWithInitialSync( + c.initializeNamespaceTracker, + c.cudnController, + c.udnController, + c.nadNotifier.Controller, + c.namespaceNotifier.Controller, + ); err != nil { + return fmt.Errorf("unable to start user-defined network controller: %v", err) + } + + return nil } -func (c *Controller) Run() error { - klog.Infof("Starting UserDefinedNetworkManager Controllers") - if err := controller.Start(c.nadNotifier.Controller, c.Controller); err != nil { - return fmt.Errorf("unable to start UserDefinedNetworkManager controller: %v", err) +// initializeNamespaceTracker populates the namespace-tracker with NAD namespaces who owned by the controller. +func (c *Controller) initializeNamespaceTracker() error { + cudns, err := c.cudnLister.List(labels.Everything()) + if err != nil { + return err + } + if len(cudns) == 0 { + return nil + } + + nads, err := c.nadLister.List(labels.Everything()) + if err != nil { + return err + } + if len(nads) == 0 { + return nil + } + indexedNADs := map[string]netv1.NetworkAttachmentDefinition{} + for _, nad := range nads { + if nad != nil { + indexedNADs[nad.Namespace+"/"+nad.Name] = *nad.DeepCopy() + } + } + + for _, cudn := range cudns { + c.namespaceTracker[cudn.Name] = sets.New[string]() + + for nadKey, nad := range indexedNADs { + if !metav1.IsControlledBy(&nad, cudn) { + continue + } + c.namespaceTracker[cudn.Name].Insert(nad.Namespace) + + // Usually we don't want to mutate an iterated map, in this case + // the processed entry is removed because it shouldn't be processed + // again and not expected to be visited again, i.e.: the NAD should + // be recorded by the namespaceTracker once. + delete(indexedNADs, nadKey) + } } return nil } func (c *Controller) Shutdown() { - controller.Stop(c.nadNotifier.Controller, c.Controller) + controller.Stop( + c.cudnController, + c.udnController, + c.nadNotifier.Controller, + c.namespaceNotifier.Controller, + ) +} + +// ReconcileNetAttachDef enqueue NAD requests following NAD events. +func (c *Controller) ReconcileNetAttachDef(key string) error { + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return fmt.Errorf("failed to split meta namespace key %q: %v", key, err) + } + nad, err := c.nadLister.NetworkAttachmentDefinitions(namespace).Get(name) + if err != nil { + if kerrors.IsNotFound(err) { + return nil + } + return fmt.Errorf("failed to get NetworkAttachmentDefinition %q from cache: %v", key, err) + } + ownerRef := metav1.GetControllerOf(nad) + if ownerRef == nil { + return nil + } + + switch ownerRef.Kind { + case "ClusterUserDefinedNetwork": + owner, err := c.cudnLister.Get(ownerRef.Name) + if err != nil { + return fmt.Errorf("failed to get ClusterUserDefinedNetwork %q from cache: %v", ownerRef.Name, err) + } + ownerKey, err := cache.MetaNamespaceKeyFunc(owner) + if err != nil { + return fmt.Errorf("failed to generate meta namespace key for CUDN: %v", err) + } + c.cudnController.Reconcile(ownerKey) + case "UserDefinedNetwork": + owner, err := c.udnLister.UserDefinedNetworks(nad.Namespace).Get(ownerRef.Name) + if err != nil { + return fmt.Errorf("failed to get UserDefinedNetwork %q from cache: %v", ownerRef.Name, err) + } + ownerKey, err := cache.MetaNamespaceKeyFunc(owner) + if err != nil { + return fmt.Errorf("failed to generate meta namespace key for UDN: %v", err) + } + c.udnController.Reconcile(ownerKey) + default: + return nil + } + return nil +} + +// ReconcileNamespace enqueue relevant Cluster UDN CR requests following namespace events. +func (c *Controller) ReconcileNamespace(key string) error { + c.namespaceTrackerLock.RLock() + defer c.namespaceTrackerLock.RUnlock() + + namespace, err := c.namespaceInformer.Lister().Get(key) + if err != nil { + return fmt.Errorf("failed to get namespace %q from cahce: %w", key, err) + } + namespaceLabels := labels.Set(namespace.Labels) + + for cudnName, affectedNamespaces := range c.namespaceTracker { + affectedNamespace := affectedNamespaces.Has(key) + + selectedNamespace := false + + if !affectedNamespace { + cudn, err := c.cudnLister.Get(cudnName) + if err != nil { + return fmt.Errorf("faild to get CUDN %q from cache: %w", cudnName, err) + } + cudnSelector, err := metav1.LabelSelectorAsSelector(&cudn.Spec.NamespaceSelector) + if err != nil { + return fmt.Errorf("failed to convert CUDN namespace selector: %w", err) + } + selectedNamespace = cudnSelector.Matches(namespaceLabels) + } + + if affectedNamespace || selectedNamespace { + klog.Infof("Enqueue ClusterUDN %q following namespace %q event", cudnName, key) + c.cudnController.Reconcile(cudnName) + } + } + + return nil +} + +// UpdateSubsystemCondition may be used by other controllers handling UDN/NAD/network setup to report conditions that +// may affect UDN functionality. +// FieldManager should be unique for every subsystem. +// If given network is not managed by a UDN, no condition will be reported and no error will be returned. +// Events may be used to report additional information about the condition to avoid overloading the condition message. +// When condition should not change, but new events should be reported, pass condition = nil. +func (c *Controller) UpdateSubsystemCondition( + networkName string, + fieldManager string, + condition *metav1.Condition, + events ...*util.EventDetails, +) error { + // try to find udn using network name + udnNamespace, udnName := template.ParseNetworkName(networkName) + if udnName == "" { + return nil + } + udn, err := c.udnLister.UserDefinedNetworks(udnNamespace).Get(udnName) + if err != nil { + return nil + } + + udnRef, err := reference.GetReference(userdefinednetworkscheme.Scheme, udn) + if err != nil { + return fmt.Errorf("failed to get object reference for UserDefinedNetwork %s/%s: %w", udnNamespace, udnName, err) + } + for _, event := range events { + c.eventRecorder.Event(udnRef, event.EventType, event.Reason, event.Note) + } + + if condition == nil { + return nil + } + + applyCondition := &metaapplyv1.ConditionApplyConfiguration{ + Type: &condition.Type, + Status: &condition.Status, + LastTransitionTime: &condition.LastTransitionTime, + Reason: &condition.Reason, + Message: &condition.Message, + } + + udnStatus := udnapplyconfkv1.UserDefinedNetworkStatus().WithConditions(applyCondition) + + applyUDN := udnapplyconfkv1.UserDefinedNetwork(udnName, udnNamespace).WithStatus(udnStatus) + opts := metav1.ApplyOptions{ + FieldManager: fieldManager, + Force: true, + } + _, err = c.udnClient.K8sV1().UserDefinedNetworks(udnNamespace).ApplyStatus(context.Background(), applyUDN, opts) + if err != nil { + if kerrors.IsNotFound(err) { + return nil + } + return fmt.Errorf("failed to update UserDefinedNetwork %s/%s status: %w", udnNamespace, udnName, err) + } + return nil } func (c *Controller) udnNeedUpdate(_, _ *userdefinednetworkv1.UserDefinedNetwork) bool { return true } -// reconcile get the user-defined-network CRD instance key and reconcile it according to spec. -// It creates network-attachment-definition according to spec at the namespace the UDN object resides. -// The NAD object are created with the same key as the request NAD, having both kinds have the same key enable +// reconcileUDN get UserDefinedNetwork CR key and reconcile it according to spec. +// It creates NAD according to spec at the namespace the CR resides. +// The NAD objects are created with the same key as the request CR, having both kinds have the same key enable // the controller to act on NAD changes as well and reconciles NAD objects (e.g: in case NAD is deleted it will be re-created). -func (c *Controller) reconcile(key string) error { +func (c *Controller) reconcileUDN(key string) error { namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { return err @@ -128,57 +364,30 @@ func (c *Controller) reconcile(key string) error { return fmt.Errorf("failed to get UserDefinedNetwork %q from cache: %v", key, err) } - nad, err := c.nadLister.NetworkAttachmentDefinitions(namespace).Get(name) - if err != nil && !kerrors.IsNotFound(err) { - return fmt.Errorf("failed to get NetworkAttachmentDefinition %q from cache: %v", key, err) - } - udnCopy := udn.DeepCopy() - nadCopy := nad.DeepCopy() - nadCopy, syncErr := c.syncUserDefinedNetwork(udnCopy, nadCopy) + nadCopy, syncErr := c.syncUserDefinedNetwork(udnCopy) updateStatusErr := c.updateUserDefinedNetworkStatus(udnCopy, nadCopy, syncErr) var networkInUse *networkInUseError if errors.As(syncErr, &networkInUse) { - c.Controller.ReconcileAfter(key, c.networkInUseRequeueInterval) + c.udnController.ReconcileAfter(key, c.networkInUseRequeueInterval) return updateStatusErr } return errors.Join(syncErr, updateStatusErr) } -type networkInUseError struct { - err error -} - -func (n *networkInUseError) Error() string { - return n.err.Error() -} - -func (c *Controller) syncUserDefinedNetwork(udn *userdefinednetworkv1.UserDefinedNetwork, nad *netv1.NetworkAttachmentDefinition) (*netv1.NetworkAttachmentDefinition, error) { +func (c *Controller) syncUserDefinedNetwork(udn *userdefinednetworkv1.UserDefinedNetwork) (*netv1.NetworkAttachmentDefinition, error) { if udn == nil { return nil, nil } if !udn.DeletionTimestamp.IsZero() { // udn is being deleted if controllerutil.ContainsFinalizer(udn, template.FinalizerUserDefinedNetwork) { - - if nad != nil && - metav1.IsControlledBy(nad, udn) && - controllerutil.ContainsFinalizer(nad, template.FinalizerUserDefinedNetwork) { - - if err := c.verifyNetAttachDefNotInUse(nad); err != nil { - return nil, fmt.Errorf("failed to verify NAD not in use [%s/%s]: %w", nad.Namespace, nad.Name, &networkInUseError{err: err}) - } - - controllerutil.RemoveFinalizer(nad, template.FinalizerUserDefinedNetwork) - nad, err := c.nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(nad.Namespace).Update(context.Background(), nad, metav1.UpdateOptions{}) - if err != nil { - return nil, err - } - klog.Infof("Finalizer removed from NetworkAttachmetDefinition [%s/%s]", nad.Namespace, nad.Name) + if err := c.deleteNAD(udn, udn.Namespace); err != nil { + return nil, fmt.Errorf("failed to delete NetworkAttachmentDefinition [%s/%s]: %w", udn.Namespace, udn.Name, err) } controllerutil.RemoveFinalizer(udn, template.FinalizerUserDefinedNetwork) @@ -189,7 +398,7 @@ func (c *Controller) syncUserDefinedNetwork(udn *userdefinednetworkv1.UserDefine klog.Infof("Finalizer removed from UserDefinedNetworks [%s/%s]", udn.Namespace, udn.Name) } - return nad, nil + return nil, nil } if finalizerAdded := controllerutil.AddFinalizer(udn, template.FinalizerUserDefinedNetwork); finalizerAdded { @@ -200,96 +409,7 @@ func (c *Controller) syncUserDefinedNetwork(udn *userdefinednetworkv1.UserDefine klog.Infof("Added Finalizer to UserDefinedNetwork [%s/%s]", udn.Namespace, udn.Name) } - desiredNAD, err := c.renderNadFn(udn) - if err != nil { - return nil, fmt.Errorf("failed to generate NetworkAttachmentDefinition: %w", err) - } - if nad == nil { - // creating NAD in case no primary network exist should be atomic and synchronized with - // any other thread that create NADs. - // Since the UserDefinedNetwork controller use single thread (threadiness=1), - // and being the only controller that create NADs, this conditions is fulfilled. - if primaryNetwork(udn.Spec) { - actualNads, lerr := c.nadLister.NetworkAttachmentDefinitions(udn.Namespace).List(labels.Everything()) - if lerr != nil { - return nil, fmt.Errorf("failed to list NetworkAttachmetDefinition: %w", lerr) - } - if err := validatePrimaryNetworkNADNotExist(actualNads); err != nil { - return nil, err - } - } - - nad, err = c.nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(udn.Namespace).Create(context.Background(), desiredNAD, metav1.CreateOptions{}) - if err != nil { - return nil, fmt.Errorf("failed to create NetworkAttachmentDefinition: %w", err) - } - klog.Infof("Created NetworkAttachmentDefinition [%s/%s]", nad.Namespace, nad.Name) - return nad, nil - } - - if !metav1.IsControlledBy(nad, udn) { - return nil, fmt.Errorf("foreign NetworkAttachmentDefinition with the desired name already exist [%s/%s]", nad.Namespace, nad.Name) - } - - if !reflect.DeepEqual(nad.Spec, desiredNAD.Spec) { - nad.Spec.Config = desiredNAD.Spec.Config - nad, err = c.nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(nad.Namespace).Update(context.Background(), nad, metav1.UpdateOptions{}) - if err != nil { - return nil, fmt.Errorf("failed to update NetworkAttachmentDefinition: %w", err) - } - klog.Infof("Updated NetworkAttachmentDefinition [%s/%s]", nad.Namespace, nad.Name) - } - - return nad, nil -} - -func primaryNetwork(spec userdefinednetworkv1.UserDefinedNetworkSpec) bool { - var role userdefinednetworkv1.NetworkRole - switch spec.Topology { - case userdefinednetworkv1.NetworkTopologyLayer3: - role = spec.Layer3.Role - case userdefinednetworkv1.NetworkTopologyLayer2: - role = spec.Layer2.Role - } - - return role == userdefinednetworkv1.NetworkRolePrimary -} - -func (c *Controller) verifyNetAttachDefNotInUse(nad *netv1.NetworkAttachmentDefinition) error { - pods, err := c.podInformer.Lister().Pods(nad.Namespace).List(labels.Everything()) - if err != nil { - return fmt.Errorf("failed to list pods at target namesapce %q: %w", nad.Namespace, err) - } - - nadName := util.GetNADName(nad.Namespace, nad.Name) - var connectedPods []string - for _, pod := range pods { - podNetworks, err := util.UnmarshalPodAnnotationAllNetworks(pod.Annotations) - if err != nil && !util.IsAnnotationNotSetError(err) { - return fmt.Errorf("failed to unmarshal pod annotation [%s/%s]: %w", pod.Namespace, pod.Name, err) - } - if _, ok := podNetworks[nadName]; ok { - connectedPods = append(connectedPods, pod.Namespace+"/"+pod.Name) - } - } - if len(connectedPods) > 0 { - return fmt.Errorf("network in use by the following pods: %v", connectedPods) - } - return nil -} - -func validatePrimaryNetworkNADNotExist(nads []*netv1.NetworkAttachmentDefinition) error { - for _, nad := range nads { - var netConf *cnitypes.NetConf - if err := json.Unmarshal([]byte(nad.Spec.Config), &netConf); err != nil { - return fmt.Errorf("failed to validate no primary network exist: unmarshal failed [%s/%s]: %w", - nad.Namespace, nad.Name, err) - } - if netConf.Type == template.OvnK8sCNIOverlay && netConf.Role == ovntypes.NetworkRolePrimary { - return fmt.Errorf("primary network already exist in namespace %q: %q", nad.Namespace, nad.Name) - } - } - return nil + return c.updateNAD(udn, udn.Namespace) } func (c *Controller) updateUserDefinedNetworkStatus(udn *userdefinednetworkv1.UserDefinedNetwork, nad *netv1.NetworkAttachmentDefinition, syncError error) error { @@ -303,9 +423,19 @@ func (c *Controller) updateUserDefinedNetworkStatus(udn *userdefinednetworkv1.Us if updated { var err error + conditionsApply := make([]*metaapplyv1.ConditionApplyConfiguration, len(conditions)) + for i := range conditions { + conditionsApply[i] = &metaapplyv1.ConditionApplyConfiguration{ + Type: &conditions[i].Type, + Status: &conditions[i].Status, + LastTransitionTime: &conditions[i].LastTransitionTime, + Reason: &conditions[i].Reason, + Message: &conditions[i].Message, + } + } udnApplyConf := udnapplyconfkv1.UserDefinedNetwork(udn.Name, udn.Namespace). WithStatus(udnapplyconfkv1.UserDefinedNetworkStatus(). - WithConditions(conditions...)) + WithConditions(conditionsApply...)) opts := metav1.ApplyOptions{FieldManager: "user-defined-network-controller"} udn, err = c.udnClient.K8sV1().UserDefinedNetworks(udn.Namespace).ApplyStatus(context.Background(), udnApplyConf, opts) if err != nil { @@ -358,3 +488,220 @@ func updateCondition(conditions []metav1.Condition, cond *metav1.Condition) ([]m } return conditions, false } + +func (c *Controller) cudnNeedUpdate(_ *userdefinednetworkv1.ClusterUserDefinedNetwork, _ *userdefinednetworkv1.ClusterUserDefinedNetwork) bool { + return true +} + +// reconcileUDN get ClusterUserDefinedNetwork CR key and reconcile it according to spec. +// It creates NADs according to spec at the spesified selected namespaces. +// The NAD objects are created with the same key as the request CR, having both kinds have the same key enable +// the controller to act on NAD changes as well and reconciles NAD objects (e.g: in case NAD is deleted it will be re-created). +func (c *Controller) reconcileCUDN(key string) error { + _, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return err + } + + cudn, err := c.cudnLister.Get(name) + if err != nil && !kerrors.IsNotFound(err) { + return fmt.Errorf("failed to get ClusterUserDefinedNetwork %q from cache: %v", key, err) + } + + cudnCopy := cudn.DeepCopy() + + nads, syncErr := c.syncClusterUDN(cudnCopy) + + updateStatusErr := c.updateClusterUDNStatus(cudnCopy, nads, syncErr) + + var networkInUse *networkInUseError + if errors.As(syncErr, &networkInUse) { + c.cudnController.ReconcileAfter(key, c.networkInUseRequeueInterval) + return updateStatusErr + } + + return errors.Join(syncErr, updateStatusErr) +} + +func (c *Controller) syncClusterUDN(cudn *userdefinednetworkv1.ClusterUserDefinedNetwork) ([]netv1.NetworkAttachmentDefinition, error) { + c.namespaceTrackerLock.Lock() + defer c.namespaceTrackerLock.Unlock() + + if cudn == nil { + return nil, nil + } + + cudnName := cudn.Name + affectedNamespaces := c.namespaceTracker[cudnName] + + if !cudn.DeletionTimestamp.IsZero() { + if controllerutil.ContainsFinalizer(cudn, template.FinalizerUserDefinedNetwork) { + var errs []error + for nsToDelete := range affectedNamespaces { + if err := c.deleteNAD(cudn, nsToDelete); err != nil { + errs = append(errs, fmt.Errorf("failed to delete NetworkAttachmentDefinition [%s/%s]: %w", + nsToDelete, cudnName, err)) + } else { + c.namespaceTracker[cudnName].Delete(nsToDelete) + } + } + + if len(errs) > 0 { + return nil, errors.Join(errs...) + } + + var err error + controllerutil.RemoveFinalizer(cudn, template.FinalizerUserDefinedNetwork) + cudn, err = c.udnClient.K8sV1().ClusterUserDefinedNetworks().Update(context.Background(), cudn, metav1.UpdateOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to remove finalizer from ClusterUserDefinedNetwork %q: %w", + cudnName, err) + } + klog.Infof("Finalizer removed from ClusterUserDefinedNetwork %q", cudn.Name) + delete(c.namespaceTracker, cudnName) + } + + return nil, nil + } + + if _, exist := c.namespaceTracker[cudnName]; !exist { + // start tracking CR + c.namespaceTracker[cudnName] = sets.Set[string]{} + } + + if finalizerAdded := controllerutil.AddFinalizer(cudn, template.FinalizerUserDefinedNetwork); finalizerAdded { + var err error + cudn, err = c.udnClient.K8sV1().ClusterUserDefinedNetworks().Update(context.Background(), cudn, metav1.UpdateOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to add finalizer to ClusterUserDefinedNetwork %q: %w", cudnName, err) + } + klog.Infof("Added Finalizer to ClusterUserDefinedNetwork %q", cudnName) + } + + selectedNamespaces, err := c.getSelectedNamespaces(cudn.Spec.NamespaceSelector) + if err != nil { + return nil, fmt.Errorf("failed to get selected namespaces: %w", err) + } + + var errs []error + for nsToDelete := range affectedNamespaces.Difference(selectedNamespaces) { + if err := c.deleteNAD(cudn, nsToDelete); err != nil { + errs = append(errs, fmt.Errorf("failed to delete NetworkAttachmentDefinition [%s/%s]: %w", + nsToDelete, cudnName, err)) + } else { + c.namespaceTracker[cudnName].Delete(nsToDelete) + } + } + + var nads []netv1.NetworkAttachmentDefinition + for nsToUpdate := range selectedNamespaces { + nad, err := c.updateNAD(cudn, nsToUpdate) + if err != nil { + errs = append(errs, err) + } else { + c.namespaceTracker[cudn.Name].Insert(nsToUpdate) + nads = append(nads, *nad) + } + } + + return nads, errors.Join(errs...) +} + +// getSelectedNamespaces list all selected namespaces according to given selector and create +// a set of the selected namespaces keys. +func (c *Controller) getSelectedNamespaces(sel metav1.LabelSelector) (sets.Set[string], error) { + selectedNamespaces := sets.Set[string]{} + labelSelector, err := metav1.LabelSelectorAsSelector(&sel) + if err != nil { + return nil, fmt.Errorf("failed to create label-selector: %w", err) + } + selectedNamespacesList, err := c.namespaceInformer.Lister().List(labelSelector) + if err != nil { + return nil, fmt.Errorf("failed to list namespaces: %w", err) + } + for _, selectedNs := range selectedNamespacesList { + selectedNamespaces.Insert(selectedNs.Name) + } + return selectedNamespaces, nil +} + +func (c *Controller) updateClusterUDNStatus(cudn *userdefinednetworkv1.ClusterUserDefinedNetwork, nads []netv1.NetworkAttachmentDefinition, syncError error) error { + if cudn == nil { + return nil + } + + // sort NADs by namespace names to avoid redundant updated due to inconsistent ordering + slices.SortFunc(nads, func(a, b netv1.NetworkAttachmentDefinition) int { + return strings.Compare(a.Namespace, b.Namespace) + }) + + networkReadyCondition := newClusterNetworkReadyCondition(nads, syncError) + + conditions, updated := updateCondition(cudn.Status.Conditions, networkReadyCondition) + if !updated { + return nil + } + conditionsApply := make([]*metaapplyv1.ConditionApplyConfiguration, len(conditions)) + for i := range conditions { + conditionsApply[i] = &metaapplyv1.ConditionApplyConfiguration{ + Type: &conditions[i].Type, + Status: &conditions[i].Status, + LastTransitionTime: &conditions[i].LastTransitionTime, + Reason: &conditions[i].Reason, + Message: &conditions[i].Message, + } + } + var err error + applyConf := udnapplyconfkv1.ClusterUserDefinedNetwork(cudn.Name). + WithStatus(udnapplyconfkv1.ClusterUserDefinedNetworkStatus(). + WithConditions(conditionsApply...)) + opts := metav1.ApplyOptions{FieldManager: "user-defined-network-controller"} + cudnName := cudn.Name + cudn, err = c.udnClient.K8sV1().ClusterUserDefinedNetworks().ApplyStatus(context.Background(), applyConf, opts) + if err != nil { + if kerrors.IsNotFound(err) { + return nil + } + return fmt.Errorf("failed to update ClusterUserDefinedNetwork status %q: %w", cudnName, err) + } + klog.Infof("Updated status ClusterUserDefinedNetwork %q", cudn.Name) + + return nil +} + +func newClusterNetworkReadyCondition(nads []netv1.NetworkAttachmentDefinition, syncError error) *metav1.Condition { + var namespaces []string + for _, nad := range nads { + namespaces = append(namespaces, nad.Namespace) + } + affectedNamespaces := strings.Join(namespaces, ", ") + + now := metav1.Now() + condition := &metav1.Condition{ + Type: "NetworkReady", + Status: metav1.ConditionTrue, + Reason: "NetworkAttachmentDefinitionReady", + Message: fmt.Sprintf("NetworkAttachmentDefinition has been created in following namespaces: [%s]", affectedNamespaces), + LastTransitionTime: now, + } + + var deletedNadKeys []string + for _, nad := range nads { + if nad.DeletionTimestamp != nil { + deletedNadKeys = append(deletedNadKeys, nad.Namespace+"/"+nad.Name) + } + } + if len(deletedNadKeys) > 0 { + condition.Status = metav1.ConditionFalse + condition.Reason = "NetworkAttachmentDefinitionDeleted" + condition.Message = fmt.Sprintf("NetworkAttachmentDefinition are being deleted: %v", deletedNadKeys) + } + + if syncError != nil { + condition.Status = metav1.ConditionFalse + condition.Reason = "NetworkAttachmentDefinitionSyncError" + condition.Message = syncError.Error() + } + + return condition +} diff --git a/go-controller/pkg/clustermanager/userdefinednetwork/controller_helper.go b/go-controller/pkg/clustermanager/userdefinednetwork/controller_helper.go new file mode 100644 index 0000000000..2fb784675b --- /dev/null +++ b/go-controller/pkg/clustermanager/userdefinednetwork/controller_helper.go @@ -0,0 +1,115 @@ +package userdefinednetwork + +import ( + "context" + "fmt" + "reflect" + + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + netv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/clustermanager/userdefinednetwork/template" + utiludn "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/udn" +) + +func (c *Controller) updateNAD(obj client.Object, namespace string) (*netv1.NetworkAttachmentDefinition, error) { + desiredNAD, err := c.renderNadFn(obj, namespace) + if err != nil { + return nil, fmt.Errorf("failed to generate NetworkAttachmentDefinition: %w", err) + } + + nad, err := c.nadLister.NetworkAttachmentDefinitions(namespace).Get(obj.GetName()) + if err != nil && !kerrors.IsNotFound(err) { + return nil, fmt.Errorf("failed to get NetworkAttachmentDefinition %s/%s from cache: %v", namespace, obj.GetName(), err) + } + nadCopy := nad.DeepCopy() + + if nadCopy == nil { + // creating NAD in case no primary network exist should be atomic and synchronized with + // any other thread that create NADs. + c.createNetworkLock.Lock() + defer c.createNetworkLock.Unlock() + + if utiludn.IsPrimaryNetwork(template.GetSpec(obj)) { + actualNads, err := c.nadLister.NetworkAttachmentDefinitions(namespace).List(labels.Everything()) + if err != nil { + return nil, fmt.Errorf("failed to list NetworkAttachmentDefinition: %w", err) + } + // This is best-effort check no primary NAD exist before creating one, + // noting prevent primary NAD from being created right after this check. + if err := PrimaryNetAttachDefNotExist(actualNads); err != nil { + return nil, err + } + } + + newNAD, err := c.nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(namespace).Create(context.Background(), desiredNAD, metav1.CreateOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to create NetworkAttachmentDefinition: %w", err) + } + klog.Infof("Created NetworkAttachmentDefinition [%s/%s]", newNAD.Namespace, newNAD.Name) + + return newNAD, nil + } + + if !metav1.IsControlledBy(nadCopy, obj) { + return nil, fmt.Errorf("foreign NetworkAttachmentDefinition with the desired name already exist [%s/%s]", nadCopy.Namespace, nadCopy.Name) + } + + if reflect.DeepEqual(nadCopy.Spec.Config, desiredNAD.Spec.Config) { + return nadCopy, nil + } + + nadCopy.Spec.Config = desiredNAD.Spec.Config + updatedNAD, err := c.nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(nadCopy.Namespace).Update(context.Background(), nadCopy, metav1.UpdateOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to update NetworkAttachmentDefinition: %w", err) + } + klog.Infof("Updated NetworkAttachmentDefinition [%s/%s]", updatedNAD.Namespace, updatedNAD.Name) + + return updatedNAD, nil +} + +func (c *Controller) deleteNAD(obj client.Object, namespace string) error { + nad, err := c.nadLister.NetworkAttachmentDefinitions(namespace).Get(obj.GetName()) + if err != nil && !kerrors.IsNotFound(err) { + return fmt.Errorf("failed to get NetworkAttachmentDefinition %s/%s from cache: %v", namespace, obj.GetName(), err) + } + nadCopy := nad.DeepCopy() + + if nadCopy == nil || + !metav1.IsControlledBy(nadCopy, obj) || + !controllerutil.ContainsFinalizer(nadCopy, template.FinalizerUserDefinedNetwork) { + return nil + } + + pods, err := c.podInformer.Lister().Pods(nadCopy.Namespace).List(labels.Everything()) + if err != nil { + return fmt.Errorf("failed to list pods at target namesapce %q: %w", nadCopy.Namespace, err) + } + // This is best-effort check no pod using the subject NAD, + // noting prevent a from being pod creation right after this check. + if err := NetAttachDefNotInUse(nadCopy, pods); err != nil { + return &networkInUseError{err: err} + } + + controllerutil.RemoveFinalizer(nadCopy, template.FinalizerUserDefinedNetwork) + updatedNAD, err := c.nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(nadCopy.Namespace).Update(context.Background(), nadCopy, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to remove NetworkAttachmentDefinition finalizer: %w", err) + } + klog.Infof("Finalizer removed from NetworkAttachmentDefinition [%s/%s]", updatedNAD.Namespace, updatedNAD.Name) + + err = c.nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(updatedNAD.Namespace).Delete(context.Background(), updatedNAD.Name, metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + klog.Infof("Deleted NetworkAttachmetDefinition [%s/%s]", updatedNAD.Namespace, updatedNAD.Name) + + return nil +} diff --git a/go-controller/pkg/clustermanager/userdefinednetwork/controller_suite_test.go b/go-controller/pkg/clustermanager/userdefinednetwork/controller_suite_test.go index 67b1587d21..941edf3635 100644 --- a/go-controller/pkg/clustermanager/userdefinednetwork/controller_suite_test.go +++ b/go-controller/pkg/clustermanager/userdefinednetwork/controller_suite_test.go @@ -3,7 +3,7 @@ package userdefinednetwork import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/clustermanager/userdefinednetwork/controller_test.go b/go-controller/pkg/clustermanager/userdefinednetwork/controller_test.go index c8ac81b1cd..3dd14f1902 100644 --- a/go-controller/pkg/clustermanager/userdefinednetwork/controller_test.go +++ b/go-controller/pkg/clustermanager/userdefinednetwork/controller_test.go @@ -4,10 +4,10 @@ import ( "context" "errors" "fmt" + "strings" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/ginkgo/extensions/table" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" @@ -15,22 +15,19 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - informerfactory "k8s.io/client-go/informers" - corev1informer "k8s.io/client-go/informers/core/v1" - "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/testing" "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" netv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + netv1clientset "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned" netv1fakeclientset "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/fake" - netv1informerfactory "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions" - netv1Informer "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/k8s.cni.cncf.io/v1" udnv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1" + udnclient "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned" udnfakeclient "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/fake" - udninformerfactory "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions" - udninformer "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/userdefinednetwork/v1" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/clustermanager/userdefinednetwork/template" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" @@ -38,515 +35,801 @@ import ( var _ = Describe("User Defined Network Controller", func() { var ( - udnClient *udnfakeclient.Clientset - nadClient *netv1fakeclientset.Clientset - udnInformer udninformer.UserDefinedNetworkInformer - nadInformer netv1Informer.NetworkAttachmentDefinitionInformer - kubeClient *fake.Clientset - podInformer corev1informer.PodInformer + cs *util.OVNClusterManagerClientset + f *factory.WatchFactory ) BeforeEach(func() { - udnClient = udnfakeclient.NewSimpleClientset() - udnInformer = udninformerfactory.NewSharedInformerFactory(udnClient, 15).K8s().V1().UserDefinedNetworks() - nadClient = netv1fakeclientset.NewSimpleClientset() - nadInformer = netv1informerfactory.NewSharedInformerFactory(nadClient, 15).K8sCniCncfIo().V1().NetworkAttachmentDefinitions() - - kubeClient = fake.NewSimpleClientset() - sharedInformer := informerfactory.NewSharedInformerFactoryWithOptions(kubeClient, 15) - podInformer = sharedInformer.Core().V1().Pods() + // Restore global default values before each testcase + Expect(config.PrepareTestConfig()).To(Succeed()) + config.OVNKubernetesFeature.EnableMultiNetwork = true + config.OVNKubernetesFeature.EnableNetworkSegmentation = true }) - Context("controller", func() { - var f *factory.WatchFactory - - BeforeEach(func() { - // Restore global default values before each testcase - Expect(config.PrepareTestConfig()).To(Succeed()) - config.OVNKubernetesFeature.EnableMultiNetwork = true - config.OVNKubernetesFeature.EnableNetworkSegmentation = true - - fakeClient := &util.OVNClusterManagerClientset{ - KubeClient: kubeClient, - NetworkAttchDefClient: nadClient, - UserDefinedNetworkClient: udnClient, - } - var err error - f, err = factory.NewClusterManagerWatchFactory(fakeClient) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Start()).To(Succeed()) - }) - - AfterEach(func() { + AfterEach(func() { + if f != nil { f.Shutdown() - }) - - It("should create NAD successfully", func() { - udn := testUDN() - udn, err := udnClient.K8sV1().UserDefinedNetworks(udn.Namespace).Create(context.Background(), udn, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) + } + }) - expectedNAD := testNAD() - c := New(nadClient, f.NADInformer(), udnClient, f.UserDefinedNetworkInformer(), renderNadStub(expectedNAD), f.PodCoreInformer()) - Expect(c.Run()).To(Succeed()) + newTestController := func(renderNADStub RenderNetAttachDefManifest, objects ...runtime.Object) *Controller { + cs = util.GetOVNClientset(objects...).GetClusterManagerClientset() + var err error + f, err = factory.NewClusterManagerWatchFactory(cs) + Expect(err).NotTo(HaveOccurred()) + Expect(f.Start()).To(Succeed()) - Eventually(func() []metav1.Condition { - udn, err = udnClient.K8sV1().UserDefinedNetworks(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - return normalizeConditions(udn.Status.Conditions) - }).Should(Equal([]metav1.Condition{{ - Type: "NetworkReady", - Status: "True", - Reason: "NetworkAttachmentDefinitionReady", - Message: "NetworkAttachmentDefinition has been created", - }})) - - nad, err := nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + return New(cs.NetworkAttchDefClient, f.NADInformer(), + cs.UserDefinedNetworkClient, f.UserDefinedNetworkInformer(), f.ClusterUserDefinedNetworkInformer(), + renderNADStub, f.PodCoreInformer(), f.NamespaceInformer(), nil, + ) + } - Expect(nad).To(Equal(expectedNAD)) + Context("manager", func() { + var c *Controller + AfterEach(func() { + if c != nil { + c.Shutdown() + } }) - - It("should fail when NAD render fail", func() { - udn := testUDN() - udn, err := udnClient.K8sV1().UserDefinedNetworks(udn.Namespace).Create(context.Background(), udn, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - renderErr := errors.New("render NAD fails") - - c := New(nadClient, f.NADInformer(), udnClient, f.UserDefinedNetworkInformer(), failRenderNadStub(renderErr), f.PodCoreInformer()) - Expect(c.Run()).To(Succeed()) - - Eventually(func() []metav1.Condition { - udn, err = udnClient.K8sV1().UserDefinedNetworks(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) + Context("reconcile UDN CR", func() { + It("should create NAD successfully", func() { + udn := testUDN() + expectedNAD := testNAD() + c = newTestController(renderNadStub(expectedNAD), udn) + Expect(c.Run()).To(Succeed()) + + Eventually(func() []metav1.Condition { + udn, err := cs.UserDefinedNetworkClient.K8sV1().UserDefinedNetworks(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + return normalizeConditions(udn.Status.Conditions) + }).Should(Equal([]metav1.Condition{{ + Type: "NetworkReady", + Status: "True", + Reason: "NetworkAttachmentDefinitionReady", + Message: "NetworkAttachmentDefinition has been created", + }})) + + nad, err := cs.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) - return normalizeConditions(udn.Status.Conditions) - }).Should(Equal([]metav1.Condition{{ - Type: "NetworkReady", - Status: "False", - Reason: "SyncError", - Message: "failed to generate NetworkAttachmentDefinition: " + renderErr.Error(), - }})) - - _, err = nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) - Expect(kerrors.IsNotFound(err)).To(BeTrue()) - }) - It("should fail when NAD create fail", func() { - udn := testUDN() - udn, err := udnClient.K8sV1().UserDefinedNetworks(udn.Namespace).Create(context.Background(), udn, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - expectedError := errors.New("create NAD error") - nadClient.PrependReactor("create", "network-attachment-definitions", func(action testing.Action) (handled bool, ret runtime.Object, err error) { - return true, nil, expectedError + Expect(nad).To(Equal(expectedNAD)) }) - c := New(nadClient, f.NADInformer(), udnClient, f.UserDefinedNetworkInformer(), noopRenderNadStub(), f.PodCoreInformer()) - Expect(c.Run()).To(Succeed()) - - Eventually(func() []metav1.Condition { - udn, err = udnClient.K8sV1().UserDefinedNetworks(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - return udn.Status.Conditions - }).ShouldNot(BeEmpty()) - - Expect(normalizeConditions(udn.Status.Conditions)).To(Equal([]metav1.Condition{{ - Type: "NetworkReady", - Status: "False", - Reason: "SyncError", - Message: "failed to create NetworkAttachmentDefinition: create NAD error", - }})) - - _, err = nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) - Expect(kerrors.IsNotFound(err)).To(BeTrue()) - }) - - It("should fail when foreign NAD exist", func() { - udn := testUDN() - udn, err := udnClient.K8sV1().UserDefinedNetworks(udn.Namespace).Create(context.Background(), udn, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - foreignNad := testNAD() - foreignNad.ObjectMeta.OwnerReferences = nil - foreignNad, err = nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(udn.Namespace).Create(context.Background(), foreignNad, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - c := New(nadClient, f.NADInformer(), udnClient, f.UserDefinedNetworkInformer(), noopRenderNadStub(), f.PodCoreInformer()) - Expect(c.Run()).To(Succeed()) - - Eventually(func() []metav1.Condition { - udn, err = udnClient.K8sV1().UserDefinedNetworks(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - return udn.Status.Conditions - }).ShouldNot(BeEmpty()) - - Expect(normalizeConditions(udn.Status.Conditions)).To(Equal([]metav1.Condition{{ - Type: "NetworkReady", - Status: "False", - Reason: "SyncError", - Message: "foreign NetworkAttachmentDefinition with the desired name already exist [test/test]", - }})) - }) - It("should reconcile mutated NAD", func() { - udn := testUDN() - udn, err := udnClient.K8sV1().UserDefinedNetworks(udn.Namespace).Create(context.Background(), udn, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - expectedNAD := testNAD() - - c := New(nadClient, f.NADInformer(), udnClient, f.UserDefinedNetworkInformer(), renderNadStub(expectedNAD), f.PodCoreInformer()) - Expect(c.Run()).To(Succeed()) + It("should fail when NAD render fail", func() { + udn := testUDN() + renderErr := errors.New("render NAD fails") + c = newTestController(failRenderNadStub(renderErr), udn) + Expect(c.Run()).To(Succeed()) + + Eventually(func() []metav1.Condition { + udn, err := cs.UserDefinedNetworkClient.K8sV1().UserDefinedNetworks(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + return normalizeConditions(udn.Status.Conditions) + }).Should(Equal([]metav1.Condition{{ + Type: "NetworkReady", + Status: "False", + Reason: "SyncError", + Message: "failed to generate NetworkAttachmentDefinition: " + renderErr.Error(), + }})) + + _, err := cs.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) + Expect(kerrors.IsNotFound(err)).To(BeTrue()) + }) + It("should fail when NAD create fail", func() { + udn := testUDN() + c = newTestController(noopRenderNadStub(), udn) + + expectedError := errors.New("create NAD error") + cs.NetworkAttchDefClient.(*netv1fakeclientset.Clientset).PrependReactor("create", "network-attachment-definitions", func(action testing.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, expectedError + }) + + Expect(c.Run()).To(Succeed()) + + Eventually(func() []metav1.Condition { + udn, err := cs.UserDefinedNetworkClient.K8sV1().UserDefinedNetworks(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + return normalizeConditions(udn.Status.Conditions) + }).Should(Equal([]metav1.Condition{{ + Type: "NetworkReady", + Status: "False", + Reason: "SyncError", + Message: "failed to create NetworkAttachmentDefinition: create NAD error", + }})) + + _, err := cs.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) + Expect(kerrors.IsNotFound(err)).To(BeTrue()) + }) - Eventually(func() []metav1.Condition { - udn, err = udnClient.K8sV1().UserDefinedNetworks(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) + It("should fail when foreign NAD exist", func() { + udn := testUDN() + foreignNad := testNAD() + foreignNad.ObjectMeta.OwnerReferences = nil + c = newTestController(noopRenderNadStub(), udn, foreignNad) + Expect(c.Run()).To(Succeed()) + + Eventually(func() []metav1.Condition { + udn, err := cs.UserDefinedNetworkClient.K8sV1().UserDefinedNetworks(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + return normalizeConditions(udn.Status.Conditions) + }).Should(Equal([]metav1.Condition{{ + Type: "NetworkReady", + Status: "False", + Reason: "SyncError", + Message: "foreign NetworkAttachmentDefinition with the desired name already exist [test/test]", + }})) + }) + It("should reconcile mutated NAD", func() { + udn := testUDN() + expectedNAD := testNAD() + c = newTestController(renderNadStub(expectedNAD), udn) + Expect(c.Run()).To(Succeed()) + + Eventually(func() []metav1.Condition { + udn, err := cs.UserDefinedNetworkClient.K8sV1().UserDefinedNetworks(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + return normalizeConditions(udn.Status.Conditions) + }).Should(Equal([]metav1.Condition{{ + Type: "NetworkReady", + Status: "True", + Reason: "NetworkAttachmentDefinitionReady", + Message: "NetworkAttachmentDefinition has been created", + }})) + + mutatedNAD := expectedNAD.DeepCopy() + mutatedNAD.Spec.Config = "MUTATED" + mutatedNAD, err := cs.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(udn.Namespace).Update(context.Background(), mutatedNAD, metav1.UpdateOptions{}) Expect(err).NotTo(HaveOccurred()) - return udn.Status.Conditions - }).ShouldNot(BeEmpty()) - Expect(normalizeConditions(udn.Status.Conditions)).To(Equal([]metav1.Condition{{ - Type: "NetworkReady", - Status: "True", - Reason: "NetworkAttachmentDefinitionReady", - Message: "NetworkAttachmentDefinition has been created", - }})) - - mutatedNAD := expectedNAD.DeepCopy() - p := []byte(`[{"op":"replace","path":"/spec/config","value":"MUTATED"}]`) - mutatedNAD, err = nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(udn.Namespace).Patch(context.Background(), mutatedNAD.Name, types.JSONPatchType, p, metav1.PatchOptions{}) - Expect(err).NotTo(HaveOccurred()) - Eventually(func() *netv1.NetworkAttachmentDefinition { - updatedNAD, err := nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) + Eventually(func() *netv1.NetworkAttachmentDefinition { + updatedNAD, err := cs.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + return updatedNAD + }).Should(Equal(expectedNAD)) + }) + It("should fail when update mutated NAD fails", func() { + udn := testUDN() + expectedNAD := testNAD() + c = newTestController(renderNadStub(expectedNAD), udn) + + expectedErr := errors.New("update error") + cs.NetworkAttchDefClient.(*netv1fakeclientset.Clientset).PrependReactor("update", "network-attachment-definitions", func(action testing.Action) (bool, runtime.Object, error) { + obj := action.(testing.UpdateAction).GetObject() + nad := obj.(*netv1.NetworkAttachmentDefinition) + if nad.Spec.Config == expectedNAD.Spec.Config { + return true, nil, expectedErr + } + return false, nad, nil + }) + + Expect(c.Run()).To(Succeed()) + + Eventually(func() []metav1.Condition { + udn, err := cs.UserDefinedNetworkClient.K8sV1().UserDefinedNetworks(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + return normalizeConditions(udn.Status.Conditions) + }).Should(Equal([]metav1.Condition{{ + Type: "NetworkReady", + Status: "True", + Reason: "NetworkAttachmentDefinitionReady", + Message: "NetworkAttachmentDefinition has been created", + }})) + actualNAD, err := cs.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) - return updatedNAD - }).Should(Equal(expectedNAD)) - }) - It("should fail when update mutated NAD fails", func() { - expectedNAD := testNAD() - - udn := testUDN() - udn, err := udnClient.K8sV1().UserDefinedNetworks(udn.Namespace).Create(context.Background(), udn, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) + Expect(actualNAD).To(Equal(expectedNAD)) - c := New(nadClient, f.NADInformer(), udnClient, f.UserDefinedNetworkInformer(), renderNadStub(expectedNAD), f.PodCoreInformer()) - Expect(c.Run()).To(Succeed()) - - Eventually(func() []metav1.Condition { - udn, err = udnClient.K8sV1().UserDefinedNetworks(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) + mutatedNAD := expectedNAD.DeepCopy() + mutatedNAD.Spec.Config = "MUTATED" + mutatedNAD, err = cs.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(udn.Namespace).Update(context.Background(), mutatedNAD, metav1.UpdateOptions{}) Expect(err).NotTo(HaveOccurred()) - return udn.Status.Conditions - }).ShouldNot(BeEmpty()) - Expect(normalizeConditions(udn.Status.Conditions)).To(Equal([]metav1.Condition{{ - Type: "NetworkReady", - Status: "True", - Reason: "NetworkAttachmentDefinitionReady", - Message: "NetworkAttachmentDefinition has been created", - }})) - - actualNAD, err := nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(actualNAD).To(Equal(expectedNAD)) - expectedErr := errors.New("update error") - nadClient.PrependReactor("update", "network-attachment-definitions", func(action testing.Action) (bool, runtime.Object, error) { - return true, nil, expectedErr + Eventually(func() []metav1.Condition { + udn, err = cs.UserDefinedNetworkClient.K8sV1().UserDefinedNetworks(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + return normalizeConditions(udn.Status.Conditions) + }).Should(Equal([]metav1.Condition{{ + Type: "NetworkReady", + Status: "False", + Reason: "SyncError", + Message: "failed to update NetworkAttachmentDefinition: " + expectedErr.Error(), + }})) + + Eventually(func() *netv1.NetworkAttachmentDefinition { + updatedNAD, err := cs.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + return updatedNAD + }).Should(Equal(mutatedNAD)) }) - mutatedNAD := expectedNAD.DeepCopy() - p := []byte(`[{"op":"replace","path":"/spec/config","value":"MUTATED"}]`) - mutatedNAD, err = nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(udn.Namespace).Patch(context.Background(), mutatedNAD.Name, types.JSONPatchType, p, metav1.PatchOptions{}) - Expect(err).NotTo(HaveOccurred()) - - Eventually(func() []metav1.Condition { - udn, err = udnClient.K8sV1().UserDefinedNetworks(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - return normalizeConditions(udn.Status.Conditions) - }).Should(Equal([]metav1.Condition{{ - Type: "NetworkReady", - Status: "False", - Reason: "SyncError", - Message: "failed to update NetworkAttachmentDefinition: " + expectedErr.Error(), - }})) - - Eventually(func() *netv1.NetworkAttachmentDefinition { - updatedNAD, err := nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - return updatedNAD - }).Should(Equal(mutatedNAD)) - }) + It("given primary UDN, should fail when primary NAD already exist", func() { + primaryUDN := testUDN() + primaryUDN.Spec.Topology = udnv1.NetworkTopologyLayer2 + primaryUDN.Spec.Layer2 = &udnv1.Layer2Config{Role: udnv1.NetworkRolePrimary} + + primaryNAD := primaryNetNAD() + c = newTestController(noopRenderNadStub(), primaryUDN, primaryNAD) + Expect(c.Run()).To(Succeed()) + + Eventually(func() []metav1.Condition { + updatedUDN, err := cs.UserDefinedNetworkClient.K8sV1().UserDefinedNetworks(primaryUDN.Namespace).Get(context.Background(), primaryUDN.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + return normalizeConditions(updatedUDN.Status.Conditions) + }).Should(Equal([]metav1.Condition{{ + Type: "NetworkReady", + Status: "False", + Reason: "SyncError", + Message: `primary network already exist in namespace "test": "primary-net-1"`, + }})) + }) + It("given primary UDN, should fail when unmarshal primary NAD fails", func() { + primaryUDN := testUDN() + primaryUDN.Spec.Topology = udnv1.NetworkTopologyLayer3 + primaryUDN.Spec.Layer3 = &udnv1.Layer3Config{Role: udnv1.NetworkRolePrimary} + + primaryNAD := primaryNetNAD() + primaryNAD.Name = "another-primary-net" + primaryNAD.Spec.Config = "!@#$" + c = newTestController(noopRenderNadStub(), primaryUDN, primaryNAD) + Expect(c.Run()).To(Succeed()) + + Eventually(func() []metav1.Condition { + updatedUDN, err := cs.UserDefinedNetworkClient.K8sV1().UserDefinedNetworks(primaryUDN.Namespace).Get(context.Background(), primaryUDN.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + return normalizeConditions(updatedUDN.Status.Conditions) + }).Should(Equal([]metav1.Condition{{ + Type: "NetworkReady", + Status: "False", + Reason: "SyncError", + Message: `failed to validate no primary network exist: unmarshal failed [test/another-primary-net]: invalid character '!' looking for beginning of value`, + }})) + }) - It("given primary UDN, should fail when primary NAD already exist", func() { - targetNs := "test" + It("should add finalizer to UDN", func() { + udn := testUDN() + udn.Finalizers = nil + c = newTestController(noopRenderNadStub(), udn) + Expect(c.Run()).To(Succeed()) + + Eventually(func() []string { + udn, err := cs.UserDefinedNetworkClient.K8sV1().UserDefinedNetworks(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + return udn.Finalizers + }).Should(Equal([]string{"k8s.ovn.org/user-defined-network-protection"})) + }) + It("should fail when add finalizer to UDN fails", func() { + udn := testUDN() + udn.Finalizers = nil + c = newTestController(noopRenderNadStub(), udn) + + expectedErr := errors.New("update UDN error") + cs.UserDefinedNetworkClient.(*udnfakeclient.Clientset).PrependReactor("update", "userdefinednetworks", func(action testing.Action) (handled bool, obj runtime.Object, err error) { + return true, nil, expectedErr + }) + + Expect(c.Run()).To(Succeed()) + + Eventually(func() []metav1.Condition { + updatedUDN, err := cs.UserDefinedNetworkClient.K8sV1().UserDefinedNetworks(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + return normalizeConditions(updatedUDN.Status.Conditions) + }).Should(Equal([]metav1.Condition{{ + Type: "NetworkReady", + Status: "False", + Reason: "SyncError", + Message: `failed to add finalizer to UserDefinedNetwork: ` + expectedErr.Error(), + }})) + }) - primaryNAD := primaryNetNAD() - primaryNAD, err := nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(targetNs).Create(context.Background(), primaryNAD, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) + It("when UDN is being deleted, NAD exist, 2 pods using UDN, should delete NAD once no pod uses the network", func() { + var err error + nad := testNAD() + udn := testUDN() + udn.SetDeletionTimestamp(&metav1.Time{Time: time.Now()}) - primaryUDN := testUDN() - primaryUDN.Spec.Topology = udnv1.NetworkTopologyLayer2 - primaryUDN.Spec.Layer2 = &udnv1.Layer2Config{Role: udnv1.NetworkRolePrimary} - primaryUDN, err = udnClient.K8sV1().UserDefinedNetworks(targetNs).Create(context.Background(), primaryUDN, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) + testOVNPodAnnot := map[string]string{util.OvnPodAnnotationName: `{"default": {"role":"primary"}, "test/test": {"role": "secondary"}}`} + pod1 := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod-1", Namespace: udn.Namespace, Annotations: testOVNPodAnnot}} + pod2 := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod-2", Namespace: udn.Namespace, Annotations: testOVNPodAnnot}} - c := New(nadClient, f.NADInformer(), udnClient, f.UserDefinedNetworkInformer(), noopRenderNadStub(), f.PodCoreInformer()) - Expect(c.Run()).To(Succeed()) + c = newTestController(renderNadStub(nad), udn, nad, pod1, pod2) + // user short interval to make the controller re-enqueue requests + c.networkInUseRequeueInterval = 50 * time.Millisecond + Expect(c.Run()).To(Succeed()) - Eventually(func() []metav1.Condition { - updatedUDN, err := udnClient.K8sV1().UserDefinedNetworks(primaryUDN.Namespace).Get(context.Background(), primaryUDN.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - return normalizeConditions(updatedUDN.Status.Conditions) - }).Should(Equal([]metav1.Condition{{ - Type: "NetworkReady", - Status: "False", - Reason: "SyncError", - Message: `primary network already exist in namespace "test": "primary-net-1"`, - }})) - }) - It("given primary UDN, should fail when unmarshal primary NAD fails", func() { - targetNs := "test" + assertFinalizersPresent(cs.UserDefinedNetworkClient, cs.NetworkAttchDefClient, udn, pod1, pod2) - primaryNAD := primaryNetNAD() - primaryNAD.Name = "another-primary-net" - primaryNAD.Spec.Config = "!@#$" - primaryNAD, err := nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(targetNs).Create(context.Background(), primaryNAD, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) + Expect(cs.KubeClient.CoreV1().Pods(udn.Namespace).Delete(context.Background(), pod1.Name, metav1.DeleteOptions{})).To(Succeed()) - primaryUDN := testUDN() - primaryUDN.Spec.Topology = udnv1.NetworkTopologyLayer3 - primaryUDN.Spec.Layer3 = &udnv1.Layer3Config{Role: udnv1.NetworkRolePrimary} - primaryUDN, err = udnClient.K8sV1().UserDefinedNetworks(targetNs).Create(context.Background(), primaryUDN, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) + assertFinalizersPresent(cs.UserDefinedNetworkClient, cs.NetworkAttchDefClient, udn, pod2) - c := New(nadClient, f.NADInformer(), udnClient, f.UserDefinedNetworkInformer(), noopRenderNadStub(), f.PodCoreInformer()) - Expect(c.Run()).To(Succeed()) + Expect(cs.KubeClient.CoreV1().Pods(udn.Namespace).Delete(context.Background(), pod2.Name, metav1.DeleteOptions{})).To(Succeed()) - Eventually(func() []metav1.Condition { - updatedUDN, err := udnClient.K8sV1().UserDefinedNetworks(primaryUDN.Namespace).Get(context.Background(), primaryUDN.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - return normalizeConditions(updatedUDN.Status.Conditions) - }).Should(Equal([]metav1.Condition{{ - Type: "NetworkReady", - Status: "False", - Reason: "SyncError", - Message: `failed to validate no primary network exist: unmarshal failed [test/another-primary-net]: invalid character '!' looking for beginning of value`, - }})) + Eventually(func() []string { + udn, err := cs.UserDefinedNetworkClient.K8sV1().UserDefinedNetworks(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + return udn.Finalizers + }).Should(BeEmpty(), "should remove finalizer on UDN following deletion and not being used") + nad, err = cs.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(nad.Namespace).Get(context.Background(), nad.Name, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + Expect(kerrors.IsNotFound(err)).To(BeTrue()) + }) }) - It("should add finalizer to UDN", func() { - udn := testUDN() - udn.Finalizers = nil - udn, err := udnClient.K8sV1().UserDefinedNetworks(udn.Namespace).Create(context.Background(), udn, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - c := New(nadClient, f.NADInformer(), udnClient, f.UserDefinedNetworkInformer(), noopRenderNadStub(), f.PodCoreInformer()) - Expect(c.Run()).To(Succeed()) - - Eventually(func() []string { - udn, err = udnClient.K8sV1().UserDefinedNetworks(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - return udn.Finalizers - }).Should(Equal([]string{"k8s.ovn.org/user-defined-network-protection"})) - }) - It("should fail when add finalizer to UDN fails", func() { - udn := testUDN() - udn.Finalizers = nil - udn, err := udnClient.K8sV1().UserDefinedNetworks(udn.Namespace).Create(context.Background(), udn, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) + Context("reconcile CUDN CR", func() { + It("should create NAD according to spec in each namespace that applies to namespace selector", func() { + testNamespaces := []string{"red", "blue"} + var objs []runtime.Object + for _, nsName := range testNamespaces { + objs = append(objs, testNamespace(nsName)) + } + cudn := testClusterUDN("test", testNamespaces...) + cudn.Spec.Network = udnv1.NetworkSpec{Topology: udnv1.NetworkTopologyLayer2, Layer2: &udnv1.Layer2Config{}} + objs = append(objs, cudn) + + c = newTestController(template.RenderNetAttachDefManifest, objs...) + Expect(c.Run()).To(Succeed()) + + expectedNsNADs := map[string]*netv1.NetworkAttachmentDefinition{} + for _, nsName := range testNamespaces { + nad := testClusterUdnNAD(cudn.Name, nsName) + networkName := "cluster.udn." + cudn.Name + nadName := nsName + "/" + cudn.Name + nad.Spec.Config = `{"cniVersion":"1.0.0","name":"` + networkName + `","netAttachDefName":"` + nadName + `","role":"","topology":"layer2","type":"ovn-k8s-cni-overlay"}` + expectedNsNADs[nsName] = nad + } - expectedErr := errors.New("update UDN error") - udnClient.PrependReactor("update", "userdefinednetworks", func(action testing.Action) (handled bool, obj runtime.Object, err error) { - return true, nil, expectedErr + Eventually(func() []metav1.Condition { + var err error + cudn, err = cs.UserDefinedNetworkClient.K8sV1().ClusterUserDefinedNetworks().Get(context.Background(), cudn.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + return normalizeConditions(cudn.Status.Conditions) + }).Should(Equal([]metav1.Condition{{ + Type: "NetworkReady", + Status: "True", + Reason: "NetworkAttachmentDefinitionReady", + Message: "NetworkAttachmentDefinition has been created in following namespaces: [blue, red]", + }}), "status should reflect NAD exist in test namespaces") + for testNamespace, expectedNAD := range expectedNsNADs { + actualNAD, err := cs.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(testNamespace).Get(context.Background(), cudn.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actualNAD).To(Equal(expectedNAD), "NAD should exist in test namespaces") + } }) - c := New(nadClient, f.NADInformer(), udnClient, f.UserDefinedNetworkInformer(), noopRenderNadStub(), f.PodCoreInformer()) - Expect(c.Run()).To(Succeed()) - - Eventually(func() []metav1.Condition { - updatedUDN, err := udnClient.K8sV1().UserDefinedNetworks(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - return normalizeConditions(updatedUDN.Status.Conditions) - }).Should(Equal([]metav1.Condition{{ - Type: "NetworkReady", - Status: "False", - Reason: "SyncError", - Message: `failed to add finalizer to UserDefinedNetwork: ` + expectedErr.Error(), - }})) - }) - - It("when UDN is being deleted, NAD exist, 2 pods using UDN, should remove finalizers once no pod uses the network", func() { - udn := testsUDNWithDeletionTimestamp(time.Now()) - udn, err := udnClient.K8sV1().UserDefinedNetworks(udn.Namespace).Create(context.Background(), udn, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) + When("CR exist, and few connected & disconnected namespaces", func() { + const ( + cudnName = "global-network" + testLabelKey = "test.io" + testLabelValue = "emea" + ) + var connectedNsNames []string + var disconnectedNsNames []string + + BeforeEach(func() { + var testObjs []runtime.Object + By("create test namespaces") + disconnectedNsNames = []string{"red", "blue"} + for _, nsName := range disconnectedNsNames { + testObjs = append(testObjs, testNamespace(nsName)) + } + By("create test namespaces with tests label") + connectedNsNames = []string{"green", "yellow"} + testLabelEmea := map[string]string{testLabelKey: testLabelValue} + for _, nsName := range connectedNsNames { + ns := testNamespace(nsName) + ns.Labels = testLabelEmea + testObjs = append(testObjs, ns) + } + By("create CUDN selecting namespaces with test label") + cudn := testClusterUDN(cudnName) + cudn.Spec = udnv1.ClusterUserDefinedNetworkSpec{NamespaceSelector: metav1.LabelSelector{MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: testLabelKey, + Operator: metav1.LabelSelectorOpIn, + Values: []string{testLabelValue}, + }}}} + testObjs = append(testObjs, cudn) + + By("start test controller") + c = newTestController(renderNadStub(testClusterUdnNAD(cudnName, "")), testObjs...) + // user short interval to make the controller re-enqueue requests when network in use + c.networkInUseRequeueInterval = 50 * time.Millisecond + Expect(c.Run()).To(Succeed()) + + Eventually(func() []metav1.Condition { + var err error + cudn, err = cs.UserDefinedNetworkClient.K8sV1().ClusterUserDefinedNetworks().Get(context.Background(), cudn.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + return normalizeConditions(cudn.Status.Conditions) + }).Should(Equal([]metav1.Condition{{ + Type: "NetworkReady", + Status: "True", + Reason: "NetworkAttachmentDefinitionReady", + Message: "NetworkAttachmentDefinition has been created in following namespaces: [green, yellow]", + }}), "status should report NAD created in test labeled namespaces") + for _, nsName := range connectedNsNames { + nads, err := cs.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(nsName).List(context.Background(), metav1.ListOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(nads.Items).To(Equal([]netv1.NetworkAttachmentDefinition{*testClusterUdnNAD(cudnName, nsName)}), + "NAD should exist in test labeled namespaces") + } + }) + + It("should reconcile mutated NADs", func() { + for _, nsName := range connectedNsNames { + p := []byte(`[{"op":"replace","path":"/spec/config","value":"MUTATED"}]`) + nad, err := cs.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(nsName).Patch(context.Background(), cudnName, types.JSONPatchType, p, metav1.PatchOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(nad.Spec.Config).To(Equal("MUTATED")) + } - nad, err := nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(udn.Namespace).Create(context.Background(), testNAD(), metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) + for _, nsName := range connectedNsNames { + Eventually(func() *netv1.NetworkAttachmentDefinition { + nad, err := cs.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(nsName).Get(context.Background(), cudnName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + return nad + }).Should(Equal(testClusterUdnNAD(cudnName, nsName))) + } + }) + + It("when CR selector has selection added, should create NAD in matching namespaces", func() { + By("create test new namespaces with new selection label") + newNsLabelValue := "us" + newNsLabel := map[string]string{testLabelKey: newNsLabelValue} + newNsNames := []string{"black", "gray"} + for _, nsName := range newNsNames { + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: nsName, Labels: newNsLabel}} + ns, err := cs.KubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + } - podMeta := metav1.ObjectMeta{ - Namespace: udn.Namespace, - Annotations: map[string]string{util.OvnPodAnnotationName: `{"default": {"role":"primary"}, "test/test": {"role": "secondary"}}`}, - } + By("add new label to CR namespace-selector") + cudn, err := cs.UserDefinedNetworkClient.K8sV1().ClusterUserDefinedNetworks().Get(context.Background(), cudnName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + cudn.Spec.NamespaceSelector.MatchExpressions[0].Values = append(cudn.Spec.NamespaceSelector.MatchExpressions[0].Values, newNsLabelValue) + cudn, err = cs.UserDefinedNetworkClient.K8sV1().ClusterUserDefinedNetworks().Update(context.Background(), cudn, metav1.UpdateOptions{}) + Expect(cudn.Spec.NamespaceSelector.MatchExpressions).To(Equal([]metav1.LabelSelectorRequirement{{ + Key: testLabelKey, + Operator: metav1.LabelSelectorOpIn, + Values: []string{testLabelValue, newNsLabelValue}, + }})) + + Eventually(func() []metav1.Condition { + var err error + cudn, err = cs.UserDefinedNetworkClient.K8sV1().ClusterUserDefinedNetworks().Get(context.Background(), cudnName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + return normalizeConditions(cudn.Status.Conditions) + }).Should(Equal([]metav1.Condition{{ + Type: "NetworkReady", + Status: "True", + Reason: "NetworkAttachmentDefinitionReady", + Message: "NetworkAttachmentDefinition has been created in following namespaces: [black, gray, green, yellow]", + }}), "status should report NAD exist in existing and new labeled namespaces") + for _, nsName := range append(connectedNsNames, newNsNames...) { + nads, err := cs.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(nsName).List(context.Background(), metav1.ListOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(nads.Items).To(Equal([]netv1.NetworkAttachmentDefinition{*testClusterUdnNAD(cudnName, nsName)}), + "NAD should exist in existing and new labeled namespaces") + } + }) + + It("when CR selector has selection removed, should delete stale NADs in previously matching namespaces", func() { + By("remove test label value from namespace-selector") + cudn, err := cs.UserDefinedNetworkClient.K8sV1().ClusterUserDefinedNetworks().Get(context.Background(), cudnName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + cudn.Spec.NamespaceSelector.MatchExpressions[0].Values = []string{""} + cudn, err = cs.UserDefinedNetworkClient.K8sV1().ClusterUserDefinedNetworks().Update(context.Background(), cudn, metav1.UpdateOptions{}) + Expect(cudn.Spec.NamespaceSelector.MatchExpressions).To(Equal([]metav1.LabelSelectorRequirement{{ + Key: testLabelKey, Operator: metav1.LabelSelectorOpIn, Values: []string{""}, + }})) + + Eventually(func() []metav1.Condition { + cudn, err := cs.UserDefinedNetworkClient.K8sV1().ClusterUserDefinedNetworks().Get(context.Background(), cudnName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + return normalizeConditions(cudn.Status.Conditions) + }).Should(Equal([]metav1.Condition{{ + Type: "NetworkReady", + Status: "True", + Reason: "NetworkAttachmentDefinitionReady", + Message: "NetworkAttachmentDefinition has been created in following namespaces: []", + }})) + for _, nsName := range connectedNsNames { + nads, err := cs.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(nsName).List(context.Background(), metav1.ListOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(nads.Items).To(BeEmpty(), + "stale NADs should not exist in previously matching namespaces") + } + }) + + It("when CR is being deleted, NADs used by pods, should not remove finalizers until no pod uses the network", func() { + var testPods []corev1.Pod + for _, nsName := range connectedNsNames { + pod := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("pod-0"), + Namespace: nsName, + Annotations: map[string]string{util.OvnPodAnnotationName: `{"default": {"role":"primary"}, "` + nsName + `/` + cudnName + `": {"role": "secondary"}}`}}, + } + pod, err := cs.KubeClient.CoreV1().Pods(nsName).Create(context.Background(), pod, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + testPods = append(testPods, *pod) + } - podInf := f.PodCoreInformer() - var testPods []corev1.Pod - for i := 0; i < 2; i++ { - pod := corev1.Pod{ObjectMeta: podMeta} - pod.Name = fmt.Sprintf("pod-%d", i) - Expect(podInf.Informer().GetIndexer().Add(&pod)).Should(Succeed()) - testPods = append(testPods, pod) - } + By("mark CR for deletion") + p := fmt.Sprintf(`[{"op": "replace", "path": "./metadata/deletionTimestamp", "value": %q }]`, "2024-01-01T00:00:00Z") + cudn, err := cs.UserDefinedNetworkClient.K8sV1().ClusterUserDefinedNetworks().Patch(context.Background(), cudnName, types.JSONPatchType, []byte(p), metav1.PatchOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cudn.DeletionTimestamp.IsZero()).To(BeFalse()) - c := New(nadClient, f.NADInformer(), udnClient, f.UserDefinedNetworkInformer(), renderNadStub(nad), podInf) - // user short interval to make the controller re-enqueue requests - c.networkInUseRequeueInterval = 50 * time.Millisecond - Expect(c.Run()).To(Succeed()) + expectedMessageNADPods := map[string]string{ + "green/global-network": "green/pod-0", + "yellow/global-network": "yellow/pod-0", + } + Eventually(func(g Gomega) { + var err error + cudn, err = cs.UserDefinedNetworkClient.K8sV1().ClusterUserDefinedNetworks().Get(context.Background(), cudnName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + // specify Gomega in order to tolerate errors until timeout + g.Expect(assertConditionReportNetworkInUse(cudn.Status.Conditions, expectedMessageNADPods)).To(Succeed()) + }).Should(Succeed()) + Expect(cudn.Finalizers).To(Equal([]string{"k8s.ovn.org/user-defined-network-protection"}), + "should not remove finalizer from CR when being used by pods") + + remainingPod := &testPods[0] + podToDelete := testPods[1:] + + By("delete pod, leaving one pod in single target namespace") + for _, pod := range podToDelete { + Expect(cs.KubeClient.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, metav1.DeleteOptions{})).ToNot(HaveOccurred()) + } - assertFinalizersPresent(udnClient, nadClient, udn, testPods...) + remainingPodKey := fmt.Sprintf("%s/%s", remainingPod.Namespace, remainingPod.Name) + remainingNADKey := fmt.Sprintf("%s/%s", remainingPod.Namespace, cudnName) + remainingNADPod := map[string]string{remainingNADKey: remainingPodKey} + Eventually(func(g Gomega) { + var err error + cudn, err = cs.UserDefinedNetworkClient.K8sV1().ClusterUserDefinedNetworks().Get(context.Background(), cudnName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + // specify Gomega making eventually tolerate error until timeout + g.Expect(assertConditionReportNetworkInUse(cudn.Status.Conditions, remainingNADPod)).To(Succeed()) + }).Should(Succeed()) + Expect(cudn.Finalizers).To(Equal([]string{"k8s.ovn.org/user-defined-network-protection"}), + "should not remove finalizer from CR when being used by pods") + + By("delete remaining pod") + Expect(cs.KubeClient.CoreV1().Pods(remainingPod.Namespace).Delete(context.Background(), remainingPod.Name, metav1.DeleteOptions{})).ToNot(HaveOccurred()) + + Eventually(func() []string { + cudn, err := cs.UserDefinedNetworkClient.K8sV1().ClusterUserDefinedNetworks().Get(context.Background(), cudnName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + return cudn.Finalizers + }).Should(BeEmpty(), "should remove finalizer from CR when no pod uses the network") + }) + + It("when new namespace is created with matching label, should create NAD in newly created namespaces", func() { + By("create new namespaces with test label") + newNsNames := []string{"black", "gray"} + for _, nsName := range newNsNames { + ns := testNamespace(nsName) + ns.Labels = map[string]string{testLabelKey: testLabelValue} + ns, err := cs.KubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + } - Expect(podInf.Informer().GetIndexer().Delete(&testPods[0])).To(Succeed()) + Eventually(func() []metav1.Condition { + cudn, err := cs.UserDefinedNetworkClient.K8sV1().ClusterUserDefinedNetworks().Get(context.Background(), cudnName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + return normalizeConditions(cudn.Status.Conditions) + }).Should(Equal([]metav1.Condition{{ + Type: "NetworkReady", + Status: "True", + Reason: "NetworkAttachmentDefinitionReady", + Message: "NetworkAttachmentDefinition has been created in following namespaces: [black, gray, green, yellow]", + }}), "status should report NAD created in existing and new test namespaces") + for _, nsName := range append(connectedNsNames, newNsNames...) { + nads, err := cs.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(nsName).List(context.Background(), metav1.ListOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(nads.Items).To(Equal([]netv1.NetworkAttachmentDefinition{*testClusterUdnNAD(cudnName, nsName)}), "NAD should exist in existing nad new test namespaces") + } + }) + + It("when existing namespace is labeled with matching label, should create NAD in newly labeled matching namespaces", func() { + By("add test label to tests disconnected namespaces") + for _, nsName := range disconnectedNsNames { + p := fmt.Sprintf(`[{"op": "add", "path": "./metadata/labels", "value": {%q: %q}}]`, testLabelKey, testLabelValue) + ns, err := cs.KubeClient.CoreV1().Namespaces().Patch(context.Background(), nsName, types.JSONPatchType, []byte(p), metav1.PatchOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(ns.Labels).To(Equal(map[string]string{testLabelKey: testLabelValue})) + } - assertFinalizersPresent(udnClient, nadClient, udn, testPods[1]) + Eventually(func() []metav1.Condition { + cudn, err := cs.UserDefinedNetworkClient.K8sV1().ClusterUserDefinedNetworks().Get(context.Background(), cudnName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + return normalizeConditions(cudn.Status.Conditions) + }).Should(Equal([]metav1.Condition{{ + Type: "NetworkReady", + Status: "True", + Reason: "NetworkAttachmentDefinitionReady", + Message: "NetworkAttachmentDefinition has been created in following namespaces: [blue, green, red, yellow]", + }}), "status should report NAD created in existing and new test namespaces") + for _, nsName := range append(connectedNsNames, disconnectedNsNames...) { + nads, err := cs.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(nsName).List(context.Background(), metav1.ListOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(nads.Items).To(Equal([]netv1.NetworkAttachmentDefinition{*testClusterUdnNAD(cudnName, nsName)}), "NAD should exist in existing nad new test namespaces") + } + }) + + It("when existing namespace's matching label removed, should delete stale NADs in previously matching namespaces", func() { + connectedNsName := connectedNsNames[0] + staleNADNsNames := connectedNsNames[1:] + + By("remove label from few connected namespaces") + for _, nsName := range staleNADNsNames { + p := fmt.Sprintf(`[{"op": "replace", "path": "./metadata/labels", "value": {}}]`) + ns, err := cs.KubeClient.CoreV1().Namespaces().Patch(context.Background(), nsName, types.JSONPatchType, []byte(p), metav1.PatchOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(ns.Labels).To(BeEmpty()) + } - Expect(podInf.Informer().GetIndexer().Delete(&testPods[1])).To(Succeed()) + Eventually(func() []metav1.Condition { + cudn, err := cs.UserDefinedNetworkClient.K8sV1().ClusterUserDefinedNetworks().Get(context.Background(), cudnName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + return normalizeConditions(cudn.Status.Conditions) + }).Should(Equal([]metav1.Condition{{ + Type: "NetworkReady", + Status: "True", + Reason: "NetworkAttachmentDefinitionReady", + Message: "NetworkAttachmentDefinition has been created in following namespaces: [" + connectedNsName + "]", + }}), "status should report NAD created in label namespace only") + + nads, err := cs.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(connectedNsName).List(context.Background(), metav1.ListOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(nads.Items).To(Equal([]netv1.NetworkAttachmentDefinition{*testClusterUdnNAD(cudnName, connectedNsName)}), + "NAD should exist in matching namespaces only") + + for _, nsName := range staleNADNsNames { + nads, err := cs.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(nsName).List(context.Background(), metav1.ListOptions{}) + Expect(err).ToNot(HaveOccurred()) + Expect(nads.Items).To(BeEmpty(), "no NAD should exist in non matching namespaces") + } + }) + }) - Eventually(func() []string { - udn, err = udnClient.K8sV1().UserDefinedNetworks(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - return udn.Finalizers - }).Should(BeEmpty(), "should remove finalizer on UDN following deletion and not being used") - nad, err = nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(nad.Namespace).Get(context.Background(), nad.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(nad.Finalizers).To(BeEmpty(), "should remove finalizer on NAD following deletion and not being used") + It("when started, CR exist, stale NADs exist, should deleted stale NADs", func() { + var testObjs []runtime.Object + staleNADsNsNames := []string{"red", "blue"} + staleLabel := map[string]string{"test.io": "stale"} + for _, nsName := range staleNADsNsNames { + ns := testNamespace(nsName) + ns.SetLabels(staleLabel) + testObjs = append(testObjs, ns) + } + connectedNsNames := []string{"green", "yellow"} + connectedLabel := map[string]string{"test.io": "connected"} + for _, nsName := range connectedNsNames { + ns := testNamespace(nsName) + ns.SetLabels(connectedLabel) + testObjs = append(testObjs, ns) + } + cudn := testClusterUDN("test") + cudn.Spec = udnv1.ClusterUserDefinedNetworkSpec{NamespaceSelector: metav1.LabelSelector{ + MatchLabels: connectedLabel, + }} + testObjs = append(testObjs, cudn) + for _, nsName := range append(staleNADsNsNames, connectedNsNames...) { + testObjs = append(testObjs, testClusterUdnNAD(cudn.Name, nsName)) + } + c = newTestController(renderNadStub(testClusterUdnNAD(cudn.Name, "")), testObjs...) + Expect(c.Run()).Should(Succeed()) + + Eventually(func() []metav1.Condition { + var err error + cudn, err = cs.UserDefinedNetworkClient.K8sV1().ClusterUserDefinedNetworks().Get(context.Background(), cudn.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + return normalizeConditions(cudn.Status.Conditions) + }, 50*time.Millisecond).Should(Equal([]metav1.Condition{{ + Type: "NetworkReady", + Status: "True", + Reason: "NetworkAttachmentDefinitionReady", + Message: "NetworkAttachmentDefinition has been created in following namespaces: [green, yellow]", + }}), "status should report NAD created in test labeled namespaces") + + for _, nsName := range staleNADsNsNames { + _, err := cs.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(nsName).Get(context.Background(), cudn.Name, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + Expect(kerrors.IsNotFound(err)).To(Equal(true)) + } + }) }) }) Context("UserDefinedNetwork object sync", func() { It("should fail when NAD owner-reference is malformed", func() { udn := testUDN() - udn, err := udnClient.K8sV1().UserDefinedNetworks(udn.Namespace).Create(context.Background(), udn, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - nad := testNAD() - c := New(nadClient, nadInformer, udnClient, udnInformer, renderNadStub(nad), nil) - - mutetedNAD := nad.DeepCopy() - mutetedNAD.ObjectMeta.OwnerReferences = []metav1.OwnerReference{{Kind: "DifferentKind"}} - mutetedNAD, err = nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(udn.Namespace).Create(context.Background(), mutetedNAD, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) + mutatedNAD := testNAD() + mutatedNAD.ObjectMeta.OwnerReferences = []metav1.OwnerReference{{Kind: "DifferentKind"}} + c := newTestController(noopRenderNadStub(), udn, mutatedNAD) - _, err = c.syncUserDefinedNetwork(udn, mutetedNAD) + _, err := c.syncUserDefinedNetwork(udn) Expect(err).To(Equal(errors.New("foreign NetworkAttachmentDefinition with the desired name already exist [test/test]"))) }) It("when UDN is being deleted, should not remove finalizer from non managed NAD", func() { - c := New(nadClient, nadInformer, udnClient, udnInformer, noopRenderNadStub(), podInformer) - udn := testsUDNWithDeletionTimestamp(time.Now()) - udn, err := udnClient.K8sV1().UserDefinedNetworks(udn.Namespace).Create(context.Background(), udn, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - unmanagedNAD := testNAD() unmanagedNAD.OwnerReferences[0].UID = "99" - unmanagedNAD, err = nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(udn.Namespace).Create(context.Background(), unmanagedNAD, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) + c := newTestController(noopRenderNadStub(), udn, unmanagedNAD) - _, err = c.syncUserDefinedNetwork(udn, unmanagedNAD) + _, err := c.syncUserDefinedNetwork(udn) Expect(err).ToNot(HaveOccurred()) + unmanagedNAD, err = cs.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(udn.Namespace).Get(context.Background(), unmanagedNAD.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) expectedFinalizers := testNAD().Finalizers Expect(unmanagedNAD.Finalizers).To(Equal(expectedFinalizers)) }) - It("when UDN is being deleted, and NAD exist, should remove finalizer from NAD", func() { - c := New(nadClient, nadInformer, udnClient, udnInformer, noopRenderNadStub(), podInformer) + It("when UDN is being deleted, and NAD exist, should delete NAD", func() { udn := testsUDNWithDeletionTimestamp(time.Now()) - udn, err := udnClient.K8sV1().UserDefinedNetworks(udn.Namespace).Create(context.Background(), udn, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - nad := testNAD() - nad, err = nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(udn.Namespace).Create(context.Background(), nad, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) + c := newTestController(noopRenderNadStub(), udn, nad) - _, err = c.syncUserDefinedNetwork(udn, nad) + _, err := c.syncUserDefinedNetwork(udn) Expect(err).ToNot(HaveOccurred()) - Expect(nad.Finalizers).To(BeEmpty()) + + nad, err = cs.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(udn.Namespace).Get(context.Background(), nad.Name, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + Expect(kerrors.IsNotFound(err)).To(BeTrue()) }) It("when UDN is being deleted, and NAD exist, should fail when remove NAD finalizer fails", func() { - c := New(nadClient, nadInformer, udnClient, udnInformer, noopRenderNadStub(), podInformer) - udn := testsUDNWithDeletionTimestamp(time.Now()) - udn, err := udnClient.K8sV1().UserDefinedNetworks(udn.Namespace).Create(context.Background(), udn, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - nad := testNAD() - nad, err = nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(udn.Namespace).Create(context.Background(), nad, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) + c := newTestController(noopRenderNadStub(), udn, nad) expectedErr := errors.New("update NAD error") - nadClient.PrependReactor("update", "network-attachment-definitions", func(action testing.Action) (bool, runtime.Object, error) { + cs.NetworkAttchDefClient.(*netv1fakeclientset.Clientset).PrependReactor("update", "network-attachment-definitions", func(action testing.Action) (bool, runtime.Object, error) { return true, nil, expectedErr }) - _, err = c.syncUserDefinedNetwork(udn, nad) + _, err := c.syncUserDefinedNetwork(udn) Expect(err).To(MatchError(expectedErr)) }) It("when UDN is being deleted, and NAD exist w/o finalizer, should remove finalizer from UDN", func() { - c := New(nadClient, nadInformer, udnClient, udnInformer, noopRenderNadStub(), podInformer) - udn := testsUDNWithDeletionTimestamp(time.Now()) - udn, err := udnClient.K8sV1().UserDefinedNetworks(udn.Namespace).Create(context.Background(), udn, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - nad := testNAD() nad.Finalizers = nil - nad, err = nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(udn.Namespace).Create(context.Background(), nad, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) + c := newTestController(noopRenderNadStub(), udn, nad) - _, err = c.syncUserDefinedNetwork(udn, nad) + _, err := c.syncUserDefinedNetwork(udn) Expect(err).ToNot(HaveOccurred()) Expect(udn.Finalizers).To(BeEmpty()) }) It("when UDN is being deleted, and NAD not exist, should remove finalizer from UDN", func() { - c := New(nadClient, nadInformer, udnClient, udnInformer, noopRenderNadStub(), podInformer) - udn := testsUDNWithDeletionTimestamp(time.Now()) - udn, err := udnClient.K8sV1().UserDefinedNetworks(udn.Namespace).Create(context.Background(), udn, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) + c := newTestController(noopRenderNadStub(), udn) - _, err = c.syncUserDefinedNetwork(udn, nil) + _, err := c.syncUserDefinedNetwork(udn) Expect(err).ToNot(HaveOccurred()) Expect(udn.Finalizers).To(BeEmpty()) }) It("when UDN is being deleted, should fail removing finalizer from UDN when patch fails", func() { - c := New(nadClient, nadInformer, udnClient, udnInformer, noopRenderNadStub(), podInformer) - udn := testsUDNWithDeletionTimestamp(time.Now()) - udn, err := udnClient.K8sV1().UserDefinedNetworks(udn.Namespace).Create(context.Background(), udn, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - nad := testNAD() nad.Finalizers = nil - nad, err = nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(udn.Namespace).Create(context.Background(), nad, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) + c := newTestController(noopRenderNadStub(), udn, nad) expectedErr := errors.New("update UDN error") - udnClient.PrependReactor("update", "userdefinednetworks", func(action testing.Action) (bool, runtime.Object, error) { + cs.UserDefinedNetworkClient.(*udnfakeclient.Clientset).PrependReactor("update", "userdefinednetworks", func(action testing.Action) (bool, runtime.Object, error) { return true, nil, expectedErr }) - _, err = c.syncUserDefinedNetwork(udn, nad) + _, err := c.syncUserDefinedNetwork(udn) Expect(err).To(MatchError(expectedErr)) }) - It("when UDN is being deleted, NAD exist, pod exist, should remove finalizers when network not being used", func() { + It("when UDN is being deleted, NAD exist, pod exist, should delete NAD when network not being used", func() { udn := testsUDNWithDeletionTimestamp(time.Now()) - udn, err := udnClient.K8sV1().UserDefinedNetworks(udn.Namespace).Create(context.Background(), udn, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - nad := testNAD() - nad, err = nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(udn.Namespace).Create(context.Background(), nad, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod1", Namespace: udn.Namespace, @@ -557,46 +840,42 @@ var _ = Describe("User Defined Network Controller", func() { }, }, } - Expect(podInformer.Informer().GetIndexer().Add(pod)).Should(Succeed()) - c := New(nadClient, nadInformer, udnClient, udnInformer, renderNadStub(nad), podInformer) + c := newTestController(renderNadStub(nad), udn, nad, pod) - nad, err = c.syncUserDefinedNetwork(udn, nad) + _, err := c.syncUserDefinedNetwork(udn) Expect(err).ToNot(HaveOccurred()) - Expect(nad.Finalizers).To(BeEmpty()) Expect(udn.Finalizers).To(BeEmpty()) + + nad, err = cs.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(udn.Namespace).Get(context.Background(), nad.Name, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + Expect(kerrors.IsNotFound(err)).To(BeTrue()) }) DescribeTable("when UDN is being deleted, NAD exist, should not remove finalizers when", func(podOvnAnnotations map[string]string, expectedErr error) { + var objs []runtime.Object udn := testsUDNWithDeletionTimestamp(time.Now()) - Expect(udnInformer.Informer().GetIndexer().Add(udn)).To(Succeed()) - nad := testNAD() - Expect(nadInformer.Informer().GetIndexer().Add(nad)).To(Succeed()) - for podName, ovnAnnotValue := range podOvnAnnotations { - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: podName, Namespace: udn.Namespace, - Annotations: map[string]string{util.OvnPodAnnotationName: ovnAnnotValue}, - }, - } - Expect(podInformer.Informer().GetIndexer().Add(pod)).Should(Succeed()) + objs = append(objs, &corev1.Pod{ObjectMeta: metav1.ObjectMeta{ + Name: podName, Namespace: udn.Namespace, + Annotations: map[string]string{util.OvnPodAnnotationName: ovnAnnotValue}, + }}) } + objs = append(objs, udn, nad) + c := newTestController(renderNadStub(nad), objs...) - c := New(nadClient, nadInformer, udnClient, udnInformer, renderNadStub(nad), podInformer) - - _, err := c.syncUserDefinedNetwork(udn, nad) + _, err := c.syncUserDefinedNetwork(udn) Expect(err).To(MatchError(ContainSubstring(expectedErr.Error()))) - actual, _, err := nadInformer.Informer().GetIndexer().Get(nad) - Expect(err).NotTo(HaveOccurred()) - Expect(actual.(*netv1.NetworkAttachmentDefinition).Finalizers).To(Equal([]string{"k8s.ovn.org/user-defined-network-protection"}), + actual, err := cs.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(udn.Namespace).Get(context.Background(), nad.Name, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + Expect(actual.Finalizers).To(Equal([]string{"k8s.ovn.org/user-defined-network-protection"}), "finalizer should remain until no pod uses the network") - actualUDN, _, err := udnInformer.Informer().GetIndexer().Get(udn) - Expect(actualUDN.(*udnv1.UserDefinedNetwork).Finalizers).To(Equal([]string{"k8s.ovn.org/user-defined-network-protection"}), + actualUDN, err := cs.UserDefinedNetworkClient.K8sV1().UserDefinedNetworks(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) + Expect(actualUDN.Finalizers).To(Equal([]string{"k8s.ovn.org/user-defined-network-protection"}), "finalizer should remain until no pod uses the network") Expect(err).NotTo(HaveOccurred()) }, @@ -629,14 +908,11 @@ var _ = Describe("User Defined Network Controller", func() { DescribeTable("should update status, when", func(nad *netv1.NetworkAttachmentDefinition, syncErr error, expectedStatus *udnv1.UserDefinedNetworkStatus) { udn := testUDN() - udn, err := udnClient.K8sV1().UserDefinedNetworks(udn.Namespace).Create(context.Background(), udn, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - c := New(nadClient, nadInformer, udnClient, udnInformer, noopRenderNadStub(), podInformer) + c := newTestController(noopRenderNadStub(), udn) Expect(c.updateUserDefinedNetworkStatus(udn, nad, syncErr)).To(Succeed(), "should update status successfully") - assertUserDefinedNetworkStatus(udnClient, udn, expectedStatus) + assertUserDefinedNetworkStatus(cs.UserDefinedNetworkClient, udn, expectedStatus) }, Entry("NAD exist", testNAD(), @@ -684,10 +960,7 @@ var _ = Describe("User Defined Network Controller", func() { It("should update status according to sync errors", func() { udn := testUDN() - udn, err := udnClient.K8sV1().UserDefinedNetworks(udn.Namespace).Create(context.Background(), udn, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - c := New(nadClient, nadInformer, udnClient, udnInformer, noopRenderNadStub(), podInformer) + c := newTestController(noopRenderNadStub(), udn) nad := testNAD() syncErr := errors.New("sync error") @@ -703,7 +976,7 @@ var _ = Describe("User Defined Network Controller", func() { }, }, } - assertUserDefinedNetworkStatus(udnClient, udn, expectedStatus) + assertUserDefinedNetworkStatus(cs.UserDefinedNetworkClient, udn, expectedStatus) anotherSyncErr := errors.New("another sync error") Expect(c.updateUserDefinedNetworkStatus(udn, nad, anotherSyncErr)).To(Succeed(), "should update status successfully") @@ -718,14 +991,14 @@ var _ = Describe("User Defined Network Controller", func() { }, }, } - assertUserDefinedNetworkStatus(udnClient, udn, expectedUpdatedStatus) + assertUserDefinedNetworkStatus(cs.UserDefinedNetworkClient, udn, expectedUpdatedStatus) }) It("should fail when client update status fails", func() { - c := New(nadClient, nadInformer, udnClient, udnInformer, noopRenderNadStub(), podInformer) + c := newTestController(noopRenderNadStub()) expectedError := errors.New("test err") - udnClient.PrependReactor("patch", "userdefinednetworks/status", func(action testing.Action) (bool, runtime.Object, error) { + cs.UserDefinedNetworkClient.(*udnfakeclient.Clientset).PrependReactor("patch", "userdefinednetworks/status", func(action testing.Action) (bool, runtime.Object, error) { return true, nil, expectedError }) @@ -734,9 +1007,240 @@ var _ = Describe("User Defined Network Controller", func() { Expect(c.updateUserDefinedNetworkStatus(udn, nad, nil)).To(MatchError(expectedError)) }) }) + + Context("ClusterUserDefinedNetwork object sync", func() { + It("should succeed given no CR", func() { + c := newTestController(noopRenderNadStub()) + _, err := c.syncClusterUDN(nil) + Expect(err).To(Not(HaveOccurred())) + }) + It("should succeed when no namespace match namespace-selector", func() { + cudn := testClusterUDN("test", "red") + c := newTestController(noopRenderNadStub(), cudn) + + nads, err := c.syncClusterUDN(cudn) + Expect(err).ToNot(HaveOccurred()) + Expect(nads).To(BeEmpty()) + }) + It("should add finalizer to CR", func() { + cudn := &udnv1.ClusterUserDefinedNetwork{Spec: udnv1.ClusterUserDefinedNetworkSpec{ + NamespaceSelector: metav1.LabelSelector{}}} + c := newTestController(noopRenderNadStub(), cudn) + + nads, err := c.syncClusterUDN(cudn) + Expect(err).ToNot(HaveOccurred()) + Expect(nads).To(BeEmpty()) + + cudn, err = cs.UserDefinedNetworkClient.K8sV1().ClusterUserDefinedNetworks().Get(context.Background(), cudn.Name, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + Expect(cudn.Finalizers).To(Equal([]string{"k8s.ovn.org/user-defined-network-protection"})) + }) + It("should fail when update NAD fails", func() { + expectedErr := errors.New("test err") + c := newTestController(failRenderNadStub(expectedErr), testNamespace("blue")) + + cudn := testClusterUDN("test", "blue") + + _, err := c.syncClusterUDN(cudn) + Expect(err).To(MatchError(expectedErr)) + }) + + It("when CR is deleted, CR has no finalizer, should succeed", func() { + deletedCUDN := testClusterUDN("test", "blue") + deletedCUDN.Finalizers = []string{} + deletedCUDN.DeletionTimestamp = &metav1.Time{Time: time.Now()} + c := newTestController(noopRenderNadStub(), deletedCUDN) + + nads, err := c.syncClusterUDN(deletedCUDN) + Expect(err).ToNot(HaveOccurred()) + Expect(nads).To(BeEmpty()) + }) + It("when CR is deleted, should remove finalizer from CR", func() { + deletedCUDN := testClusterUDN("test", "blue") + deletedCUDN.DeletionTimestamp = &metav1.Time{Time: time.Now()} + c := newTestController(noopRenderNadStub(), deletedCUDN) + + nads, err := c.syncClusterUDN(deletedCUDN) + Expect(err).ToNot(HaveOccurred()) + Expect(nads).To(BeEmpty()) + Expect(deletedCUDN.Finalizers).To(BeEmpty()) + }) + Context("CR is being deleted, associate NADs exists", func() { + const testNsName = "blue" + var c *Controller + var cudn *udnv1.ClusterUserDefinedNetwork + + BeforeEach(func() { + testNs := testNamespace(testNsName) + cudn = testClusterUDN("test", testNs.Name) + expectedNAD := testClusterUdnNAD(cudn.Name, testNs.Name) + c = newTestController(renderNadStub(expectedNAD), cudn, testNs, expectedNAD) + + nads, err := c.syncClusterUDN(cudn) + Expect(err).ToNot(HaveOccurred()) + Expect(nads).To(ConsistOf(*expectedNAD)) + + By("mark CR for deletion") + cudn.DeletionTimestamp = &metav1.Time{Time: time.Now()} + cudn, err = cs.UserDefinedNetworkClient.K8sV1().ClusterUserDefinedNetworks().Update(context.Background(), cudn, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) + Expect(cudn.DeletionTimestamp.IsZero()).To(BeFalse()) + }) + + It("should delete NAD", func() { + nads, err := c.syncClusterUDN(cudn) + Expect(err).ToNot(HaveOccurred()) + Expect(nads).To(BeEmpty()) + Expect(cudn.Finalizers).To(BeEmpty()) + + nadList, err := cs.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(testNsName).List(context.Background(), metav1.ListOptions{}) + Expect(err).ToNot(HaveOccurred()) + Expect(nadList.Items).To(BeEmpty()) + }) + It("should fail remove NAD finalizer when update NAD fails", func() { + expectedErr := errors.New("test err") + cs.NetworkAttchDefClient.(*netv1fakeclientset.Clientset).PrependReactor("update", "network-attachment-definitions", func(action testing.Action) (bool, runtime.Object, error) { + return true, nil, expectedErr + }) + + _, err := c.syncClusterUDN(cudn) + Expect(err).To(MatchError(expectedErr)) + }) + It("should fail remove NAD finalizer when delete NAD fails", func() { + expectedErr := errors.New("test err") + cs.NetworkAttchDefClient.(*netv1fakeclientset.Clientset).PrependReactor("delete", "network-attachment-definitions", func(action testing.Action) (bool, runtime.Object, error) { + return true, nil, expectedErr + }) + + _, err := c.syncClusterUDN(cudn) + Expect(err).To(MatchError(expectedErr)) + }) + }) + }) + + Context("ClusterUserDefinedNetwork status update", func() { + It("should succeed given no CR", func() { + c := newTestController(noopRenderNadStub()) + Expect(c.updateClusterUDNStatus(nil, nil, nil)).To(Succeed()) + }) + It("should fail when CR apply status fails", func() { + cudn := testClusterUDN("test") + c := newTestController(noopRenderNadStub(), cudn) + + expectedErr := errors.New("test patch error") + cs.UserDefinedNetworkClient.(*udnfakeclient.Clientset).PrependReactor("patch", "clusteruserdefinednetworks", func(action testing.Action) (bool, runtime.Object, error) { + return true, nil, expectedErr + }) + + Expect(c.updateClusterUDNStatus(cudn, nil, nil)).ToNot(Succeed()) + }) + It("should reflect active namespaces", func() { + testNsNames := []string{"red", "green"} + + cudn := testClusterUDN("test", testNsNames...) + c := newTestController(noopRenderNadStub(), cudn) + + var testNADs []netv1.NetworkAttachmentDefinition + for _, nsName := range testNsNames { + testNADs = append(testNADs, *testClusterUdnNAD(cudn.Name, nsName)) + } + + Expect(c.updateClusterUDNStatus(cudn, testNADs, nil)).To(Succeed()) + + cudn, err := cs.UserDefinedNetworkClient.K8sV1().ClusterUserDefinedNetworks().Get(context.Background(), cudn.Name, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + Expect(normalizeConditions(cudn.Status.Conditions)).To(ConsistOf([]metav1.Condition{ + { + Type: "NetworkReady", + Status: "True", + Reason: "NetworkAttachmentDefinitionReady", + Message: "NetworkAttachmentDefinition has been created in following namespaces: [green, red]", + }, + })) + }) + It("should reflect deleted NADs", func() { + const nsRed = "red" + const nsGreen = "green" + cudn := testClusterUDN("test", nsRed, nsGreen) + c := newTestController(noopRenderNadStub(), cudn) + + nadRed := *testClusterUdnNAD(cudn.Name, nsRed) + testNADs := []netv1.NetworkAttachmentDefinition{nadRed} + + nadGreen := *testClusterUdnNAD(cudn.Name, nsGreen) + nadGreen.DeletionTimestamp = &metav1.Time{Time: time.Now()} + testNADs = append(testNADs, nadGreen) + + Expect(c.updateClusterUDNStatus(cudn, testNADs, nil)).To(Succeed()) + + cudn, err := cs.UserDefinedNetworkClient.K8sV1().ClusterUserDefinedNetworks().Get(context.Background(), cudn.Name, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + Expect(normalizeConditions(cudn.Status.Conditions)).To(ConsistOf([]metav1.Condition{ + { + Type: "NetworkReady", + Status: "False", + Reason: "NetworkAttachmentDefinitionDeleted", + Message: "NetworkAttachmentDefinition are being deleted: [green/test]", + }, + })) + }) + It("should reflect NAD sync state", func() { + testNsNames := []string{"red", "green"} + + cudn := testClusterUDN("test", testNsNames...) + c := newTestController(noopRenderNadStub(), cudn) + + var testNADs []netv1.NetworkAttachmentDefinition + for _, nsName := range testNsNames { + testNADs = append(testNADs, *testClusterUdnNAD(cudn.Name, nsName)) + } + + testErr := errors.New("test sync NAD error") + Expect(c.updateClusterUDNStatus(cudn, testNADs, testErr)).To(Succeed()) + + cudn, err := cs.UserDefinedNetworkClient.K8sV1().ClusterUserDefinedNetworks().Get(context.Background(), cudn.Name, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + Expect(normalizeConditions(cudn.Status.Conditions)).To(ConsistOf([]metav1.Condition{ + { + Type: "NetworkReady", + Status: "False", + Reason: "NetworkAttachmentDefinitionSyncError", + Message: "test sync NAD error", + }, + })) + }) + }) }) -func assertUserDefinedNetworkStatus(udnClient *udnfakeclient.Clientset, udn *udnv1.UserDefinedNetwork, expectedStatus *udnv1.UserDefinedNetworkStatus) { +// assertConditionReportNetworkInUse checks conditions reflect network consumers. +func assertConditionReportNetworkInUse(conditions []metav1.Condition, messageNADPods map[string]string) error { + // In order make this check fit Eventually clause in a way it could wait for the expected condition state + // Gomega Expect is not being used; as they would make Eventually fail immediately. + // In addition, Gomega equality matcher cannot be used since condition message namespaces order is inconsistent. + + if len(conditions) != 1 { + return fmt.Errorf("expeced conditions to have len 1, got: %d", len(conditions)) + } + + c := conditions[0] + if c.Type != "NetworkReady" || + c.Status != metav1.ConditionFalse || + c.Reason != "NetworkAttachmentDefinitionSyncError" { + + return fmt.Errorf("got condition in unexpected state: %+v", c) + } + + for nadKey, podKey := range messageNADPods { + expectedToken := fmt.Sprintf("failed to delete NetworkAttachmentDefinition [%s]: network in use by the following pods: [%s]", nadKey, podKey) + if !strings.Contains(c.Message, expectedToken) { + return fmt.Errorf("condition message dosent contain expected token %q, got: %q", expectedToken, c.Message) + } + } + + return nil +} + +func assertUserDefinedNetworkStatus(udnClient udnclient.Interface, udn *udnv1.UserDefinedNetwork, expectedStatus *udnv1.UserDefinedNetworkStatus) { actualUDN, err := udnClient.K8sV1().UserDefinedNetworks(udn.Namespace).Get(context.Background(), udn.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) @@ -746,16 +1250,16 @@ func assertUserDefinedNetworkStatus(udnClient *udnfakeclient.Clientset, udn *udn } func assertFinalizersPresent( - udnClient *udnfakeclient.Clientset, - nadClient *netv1fakeclientset.Clientset, + udnClient udnclient.Interface, + nadClient netv1clientset.Interface, udn *udnv1.UserDefinedNetwork, - pods ...corev1.Pod, + pods ...*corev1.Pod, ) { var podNames []string for _, pod := range pods { podNames = append(podNames, pod.Namespace+"/"+pod.Name) } - expectedConditionMsg := fmt.Sprintf(`failed to verify NAD not in use [%s/%s]: network in use by the following pods: %v`, + expectedConditionMsg := fmt.Sprintf(`failed to delete NetworkAttachmentDefinition [%s/%s]: network in use by the following pods: %v`, udn.Namespace, udn.Name, podNames) Eventually(func() []metav1.Condition { @@ -842,6 +1346,60 @@ func testNADWithDeletionTimestamp(ts time.Time) *netv1.NetworkAttachmentDefiniti return nad } +func testNamespace(name string) *corev1.Namespace { + return &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: map[string]string{ + "kubernetes.io/metadata.name": name, + }, + }, + } +} + +func testClusterUDN(name string, targetNamespaces ...string) *udnv1.ClusterUserDefinedNetwork { + return &udnv1.ClusterUserDefinedNetwork{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"k8s.ovn.org/user-defined-network": ""}, + Finalizers: []string{"k8s.ovn.org/user-defined-network-protection"}, + Name: name, + UID: "1", + }, + Spec: udnv1.ClusterUserDefinedNetworkSpec{ + NamespaceSelector: metav1.LabelSelector{MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: corev1.LabelMetadataName, + Operator: metav1.LabelSelectorOpIn, + Values: targetNamespaces, + }, + }}, + Network: udnv1.NetworkSpec{}, + }, + } +} + +func testClusterUdnNAD(name, namespace string) *netv1.NetworkAttachmentDefinition { + return &netv1.NetworkAttachmentDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{"k8s.ovn.org/user-defined-network": ""}, + Finalizers: []string{"k8s.ovn.org/user-defined-network-protection"}, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: udnv1.SchemeGroupVersion.String(), + Kind: "ClusterUserDefinedNetwork", + Name: name, + UID: "1", + BlockOwnerDeletion: pointer.Bool(true), + Controller: pointer.Bool(true), + }, + }, + }, + Spec: netv1.NetworkAttachmentDefinitionSpec{}, + } +} + func noopRenderNadStub() RenderNetAttachDefManifest { return newRenderNadStub(nil, nil) } @@ -855,7 +1413,7 @@ func failRenderNadStub(err error) RenderNetAttachDefManifest { } func newRenderNadStub(nad *netv1.NetworkAttachmentDefinition, err error) RenderNetAttachDefManifest { - return func(udn *udnv1.UserDefinedNetwork) (*netv1.NetworkAttachmentDefinition, error) { + return func(obj client.Object, targetNamespace string) (*netv1.NetworkAttachmentDefinition, error) { return nad, err } } diff --git a/go-controller/pkg/clustermanager/userdefinednetwork/nad.go b/go-controller/pkg/clustermanager/userdefinednetwork/nad.go new file mode 100644 index 0000000000..6cbbca5171 --- /dev/null +++ b/go-controller/pkg/clustermanager/userdefinednetwork/nad.go @@ -0,0 +1,52 @@ +package userdefinednetwork + +import ( + "encoding/json" + "fmt" + + v1 "k8s.io/api/core/v1" + + netv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/clustermanager/userdefinednetwork/template" + cnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" + ovntypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" +) + +// NetAttachDefNotInUse checks no pod is attached to given NAD. +// Pod considered not attached to network in case its OVN pod annotation doesn't specify +// the given NAD key. +func NetAttachDefNotInUse(nad *netv1.NetworkAttachmentDefinition, pods []*v1.Pod) error { + nadName := util.GetNADName(nad.Namespace, nad.Name) + var connectedPods []string + for _, pod := range pods { + podNetworks, err := util.UnmarshalPodAnnotationAllNetworks(pod.Annotations) + if err != nil && !util.IsAnnotationNotSetError(err) { + return fmt.Errorf("failed to verify NAD not in use [%[1]s/%[2]s]: failed to unmarshal pod annotation [%[1]s/%[3]s]: %[4]w", + nad.Namespace, nad.Name, pod.Name, err) + } + if _, ok := podNetworks[nadName]; ok { + connectedPods = append(connectedPods, pod.Namespace+"/"+pod.Name) + } + } + if len(connectedPods) > 0 { + return fmt.Errorf("network in use by the following pods: %v", connectedPods) + } + return nil +} + +// PrimaryNetAttachDefNotExist checks no OVN-K primary network NAD exist in the given slice. +func PrimaryNetAttachDefNotExist(nads []*netv1.NetworkAttachmentDefinition) error { + for _, nad := range nads { + var netConf *cnitypes.NetConf + if err := json.Unmarshal([]byte(nad.Spec.Config), &netConf); err != nil { + return fmt.Errorf("failed to validate no primary network exist: unmarshal failed [%s/%s]: %w", + nad.Namespace, nad.Name, err) + } + if netConf.Type == template.OvnK8sCNIOverlay && netConf.Role == ovntypes.NetworkRolePrimary { + return fmt.Errorf("primary network already exist in namespace %q: %q", nad.Namespace, nad.Name) + } + } + return nil +} diff --git a/go-controller/pkg/clustermanager/userdefinednetwork/nad_test.go b/go-controller/pkg/clustermanager/userdefinednetwork/nad_test.go new file mode 100644 index 0000000000..0a8b68121b --- /dev/null +++ b/go-controller/pkg/clustermanager/userdefinednetwork/nad_test.go @@ -0,0 +1,124 @@ +package userdefinednetwork + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + netv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" +) + +var _ = Describe("NetAttachDefNotInUse", func() { + DescribeTable("should succeed", + func(nad *netv1.NetworkAttachmentDefinition, pods []*corev1.Pod) { + Expect(NetAttachDefNotInUse(nad, pods)).To(Succeed()) + }, + Entry("pods has no OVN annotation", + &netv1.NetworkAttachmentDefinition{ + ObjectMeta: metav1.ObjectMeta{Name: "test-net", Namespace: "blue"}, + }, + []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "blue"}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "bar", Namespace: "blue"}, + }, + }, + ), + Entry("no pod is connected", + &netv1.NetworkAttachmentDefinition{ + ObjectMeta: metav1.ObjectMeta{Name: "test-net", Namespace: "blue"}, + }, + []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "blue", + Annotations: map[string]string{ + "k8s.ovn.org/pod-networks": `{"default":{"role":"primary", "mac_address":"0a:58:0a:f4:02:03"}}`, + }}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "bar", Namespace: "blue"}, + }, + }, + ), + ) + + DescribeTable("should fail", + func(nad *netv1.NetworkAttachmentDefinition, pods []*corev1.Pod) { + Expect(NetAttachDefNotInUse(nad, pods)).ToNot(Succeed()) + }, + Entry("1 pod is connected", + &netv1.NetworkAttachmentDefinition{ + ObjectMeta: metav1.ObjectMeta{Name: "test-net", Namespace: "blue"}, + }, + []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "blue", + Annotations: map[string]string{ + "k8s.ovn.org/pod-networks": `{"default":{"role":"infrastructure-locked", "mac_address":"0a:58:0a:f4:02:03"},` + + `"blue/test-net":{"role": "primary","mac_address":"0a:58:0a:f4:02:01"}}`, + }}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "bar"}, + }, + }, + ), + Entry("1 pod has invalid annotation", + &netv1.NetworkAttachmentDefinition{ + ObjectMeta: metav1.ObjectMeta{Name: "test-net", Namespace: "blue"}, + }, + []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "blue", + Annotations: map[string]string{ + "k8s.ovn.org/pod-networks": `INVALID`, + }}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "bar"}, + }, + }, + ), + ) +}) + +var _ = Describe("PrimaryNetAttachDefNotExist", func() { + It("should succeed given no primary UDN NAD", func() { + nads := []*netv1.NetworkAttachmentDefinition{ + { + ObjectMeta: metav1.ObjectMeta{Name: "test-net1", Namespace: "blue"}, + Spec: netv1.NetworkAttachmentDefinitionSpec{Config: `{"cniVersion": "1.0.0","type": "ovn-k8s-cni-overlay","role": "secondary"}`}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "test-net2", Namespace: "blue"}, + Spec: netv1.NetworkAttachmentDefinitionSpec{Config: `{"cniVersion": "1.0.0","type": "ovn-k8s-cni-overlay","role": "secondary"}`}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "test-net3", Namespace: "blue"}, + Spec: netv1.NetworkAttachmentDefinitionSpec{Config: `{"cniVersion": "1.0.0","type": "fake-ovn-cni","role": "primary"}`}, + }, + } + Expect(PrimaryNetAttachDefNotExist(nads)).To(Succeed()) + }) + It("should fail given primary UDN NAD", func() { + nads := []*netv1.NetworkAttachmentDefinition{ + { + ObjectMeta: metav1.ObjectMeta{Name: "test-net1", Namespace: "blue"}, + Spec: netv1.NetworkAttachmentDefinitionSpec{Config: `{"cniVersion": "1.0.0","type": "ovn-k8s-cni-overlay","role": "primary"}`}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "test-net2", Namespace: "blue"}, + Spec: netv1.NetworkAttachmentDefinitionSpec{Config: `{"cniVersion": "1.0.0","type": "ovn-k8s-cni-overlay","role": "secondary"}`}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "test-net3", Namespace: "blue"}, + Spec: netv1.NetworkAttachmentDefinitionSpec{Config: `{"cniVersion": "1.0.0","type": "fake-ovn-cni","role": "primary"}`}, + }, + } + Expect(PrimaryNetAttachDefNotExist(nads)).ToNot(Succeed()) + }) +}) diff --git a/go-controller/pkg/clustermanager/userdefinednetwork/notifier/namespace.go b/go-controller/pkg/clustermanager/userdefinednetwork/notifier/namespace.go new file mode 100644 index 0000000000..90ff81befc --- /dev/null +++ b/go-controller/pkg/clustermanager/userdefinednetwork/notifier/namespace.go @@ -0,0 +1,69 @@ +package notifier + +import ( + "errors" + "reflect" + + corev1 "k8s.io/api/core/v1" + corev1informer "k8s.io/client-go/informers/core/v1" + "k8s.io/client-go/util/workqueue" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/controller" +) + +type NamespaceReconciler interface { + ReconcileNamespace(key string) error +} + +// NamespaceNotifier watches Namespaces objects and notify subscribers upon change. +// It enqueues the reconciled object keys in the subscribing controllers workqueue. +type NamespaceNotifier struct { + Controller controller.Controller + + subscribers []NamespaceReconciler +} + +func NewNamespaceNotifier(nsInformer corev1informer.NamespaceInformer, subscribers ...NamespaceReconciler) *NamespaceNotifier { + c := &NamespaceNotifier{ + subscribers: subscribers, + } + + nsLister := nsInformer.Lister() + cfg := &controller.ControllerConfig[corev1.Namespace]{ + RateLimiter: workqueue.DefaultTypedControllerRateLimiter[string](), + Reconcile: c.reconcile, + ObjNeedsUpdate: c.needUpdate, + Threadiness: 1, + Informer: nsInformer.Informer(), + Lister: nsLister.List, + } + c.Controller = controller.NewController[corev1.Namespace]("udn-namespace-controller", cfg) + + return c +} + +// needUpdate return true when the namespace has been deleted or created. +func (c *NamespaceNotifier) needUpdate(old, new *corev1.Namespace) bool { + nsCreated := old == nil && new != nil + nsDeleted := old != nil && new == nil + nsLabelsChanged := old != nil && new != nil && + !reflect.DeepEqual(old.Labels, new.Labels) + + return nsCreated || nsDeleted || nsLabelsChanged +} + +// reconcile notify subscribers with the request namespace key following namespace events. +func (c *NamespaceNotifier) reconcile(key string) error { + var errs []error + for _, subscriber := range c.subscribers { + if subscriber != nil { + // enqueue the reconciled NAD key in the subscribers workqueue to + // enable the subscriber act on NAD changes (e.g.: reflect NAD state is status) + if err := subscriber.ReconcileNamespace(key); err != nil { + errs = append(errs, err) + } + } + } + + return errors.Join(errs...) +} diff --git a/go-controller/pkg/clustermanager/userdefinednetwork/notifier/namespace_test.go b/go-controller/pkg/clustermanager/userdefinednetwork/notifier/namespace_test.go new file mode 100644 index 0000000000..5ad4ff66f5 --- /dev/null +++ b/go-controller/pkg/clustermanager/userdefinednetwork/notifier/namespace_test.go @@ -0,0 +1,164 @@ +package notifier + +import ( + "context" + "maps" + "strconv" + "sync" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" + + netv1fake "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/fake" + + udnv1fake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/fake" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/controller" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" +) + +var _ = Describe("NamespaceNotifier", func() { + var ( + kubeClient *fake.Clientset + wf *factory.WatchFactory + testNsNotifier *NamespaceNotifier + ) + + BeforeEach(func() { + kubeClient = fake.NewSimpleClientset() + + // enable features to make watch-factory start the namespace informer + Expect(config.PrepareTestConfig()).To(Succeed()) + config.OVNKubernetesFeature.EnableMultiNetwork = true + config.OVNKubernetesFeature.EnableNetworkSegmentation = true + fakeClient := &util.OVNClusterManagerClientset{ + KubeClient: kubeClient, + NetworkAttchDefClient: netv1fake.NewSimpleClientset(), + UserDefinedNetworkClient: udnv1fake.NewSimpleClientset(), + } + var err error + wf, err = factory.NewClusterManagerWatchFactory(fakeClient) + Expect(err).NotTo(HaveOccurred()) + Expect(wf.Start()).To(Succeed()) + }) + + AfterEach(func() { + wf.Shutdown() + }) + + var s *testSubscriber + + BeforeEach(func() { + s = &testSubscriber{reconciledKeys: map[string]int64{}} + testNsNotifier = NewNamespaceNotifier(wf.NamespaceInformer(), s) + Expect(controller.Start(testNsNotifier.Controller)).Should(Succeed()) + + // create tests namespaces + for i := 0; i < 3; i++ { + nsName := "test-" + strconv.Itoa(i) + _, err := kubeClient.CoreV1().Namespaces().Create(context.Background(), testNamespace(nsName), metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + } + }) + + AfterEach(func() { + if testNsNotifier != nil { + controller.Stop(testNsNotifier.Controller) + } + }) + + It("should notify namespace create events", func() { + Eventually(func() map[string]int64 { + return s.GetReconciledKeys() + }).Should(Equal(map[string]int64{ + "test-0": 1, + "test-1": 1, + "test-2": 1, + })) + }) + + It("should notify namespace delete events", func() { + Eventually(func() map[string]int64 { + return s.GetReconciledKeys() + }).Should(Equal(map[string]int64{ + "test-0": 1, + "test-1": 1, + "test-2": 1, + })) + + Expect(kubeClient.CoreV1().Namespaces().Delete(context.Background(), "test-2", metav1.DeleteOptions{})).To(Succeed()) + Expect(kubeClient.CoreV1().Namespaces().Delete(context.Background(), "test-0", metav1.DeleteOptions{})).To(Succeed()) + + Eventually(func() map[string]int64 { + return s.GetReconciledKeys() + }).Should(Equal(map[string]int64{ + "test-0": 2, + "test-1": 1, + "test-2": 2, + }), "should record additional two events, following namespaces deletion") + }) + + It("should notify namespace labels change events", func() { + Eventually(func() map[string]int64 { + return s.GetReconciledKeys() + }).Should(Equal(map[string]int64{ + "test-0": 1, + "test-1": 1, + "test-2": 1, + })) + + ns, err := kubeClient.CoreV1().Namespaces().Get(context.Background(), "test-1", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + ns.Labels["test.io"] = "example" + ns, err = kubeClient.CoreV1().Namespaces().Update(context.Background(), ns, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Eventually(func() map[string]int64 { + return s.GetReconciledKeys() + }).Should(Equal(map[string]int64{ + "test-0": 1, + "test-1": 2, + "test-2": 1, + }), "should record additional event following namespace update") + }) +}) + +func testNamespace(name string) *corev1.Namespace { + return &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: map[string]string{ + corev1.LabelMetadataName: name, + }, + }, + } +} + +type testSubscriber struct { + err error + reconciledKeys map[string]int64 + lock sync.RWMutex +} + +func (s *testSubscriber) ReconcileNamespace(key string) error { + s.lock.Lock() + defer s.lock.Unlock() + + s.reconciledKeys[key]++ + return s.err +} + +func (s *testSubscriber) GetReconciledKeys() map[string]int64 { + s.lock.RLock() + defer s.lock.RUnlock() + + cp := map[string]int64{} + maps.Copy(cp, s.reconciledKeys) + return cp +} diff --git a/go-controller/pkg/clustermanager/userdefinednetwork/notifier/net-attach-def.go b/go-controller/pkg/clustermanager/userdefinednetwork/notifier/net-attach-def.go index 0c7b14b821..f0ead0d2c1 100644 --- a/go-controller/pkg/clustermanager/userdefinednetwork/notifier/net-attach-def.go +++ b/go-controller/pkg/clustermanager/userdefinednetwork/notifier/net-attach-def.go @@ -10,7 +10,7 @@ import ( ) type NetAttachDefReconciler interface { - ReconcileNetAttachDef(key string) + ReconcileNetAttachDef(key string) error } // NetAttachDefNotifier watches NetworkAttachmentDefinition objects and notify subscribers upon change. @@ -28,7 +28,7 @@ func NewNetAttachDefNotifier(nadInfomer netv1infomer.NetworkAttachmentDefinition nadLister := nadInfomer.Lister() cfg := &controller.ControllerConfig[netv1.NetworkAttachmentDefinition]{ - RateLimiter: workqueue.DefaultControllerRateLimiter(), + RateLimiter: workqueue.DefaultTypedControllerRateLimiter[string](), Reconcile: c.reconcile, ObjNeedsUpdate: c.needUpdate, Threadiness: 1, @@ -49,7 +49,9 @@ func (c *NetAttachDefNotifier) reconcile(key string) error { if subscriber != nil { // enqueue the reconciled NAD key in the subscribers workqueue to // enable the subscriber act on NAD changes (e.g.: reflect NAD state is status) - subscriber.ReconcileNetAttachDef(key) + if err := subscriber.ReconcileNetAttachDef(key); err != nil { + return err + } } } diff --git a/go-controller/pkg/clustermanager/userdefinednetwork/notifier/notifier_suite_test.go b/go-controller/pkg/clustermanager/userdefinednetwork/notifier/notifier_suite_test.go new file mode 100644 index 0000000000..1ebe3fb9a1 --- /dev/null +++ b/go-controller/pkg/clustermanager/userdefinednetwork/notifier/notifier_suite_test.go @@ -0,0 +1,13 @@ +package notifier + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestNotifier(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Notifier Suite") +} diff --git a/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template.go b/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template.go index ed99094cde..6dbc473338 100644 --- a/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template.go +++ b/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template.go @@ -3,76 +3,116 @@ package template import ( "encoding/json" "fmt" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "strings" - netv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + netv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" cnitypes "github.com/containernetworking/cni/pkg/types" userdefinednetworkv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1" ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) const ( - OvnK8sCNIOverlay = "ovn-k8s-cni-overlay" + OvnK8sCNIOverlay = "ovn-k8s-cni-overlay" + FinalizerUserDefinedNetwork = "k8s.ovn.org/user-defined-network-protection" + LabelUserDefinedNetwork = "k8s.ovn.org/user-defined-network" - cniVersion = "1.0.0" - labelUserDefinedNetwork = "k8s.ovn.org/user-defined-network" + cniVersion = "1.0.0" ) -var udnKind = userdefinednetworkv1.SchemeGroupVersion.WithKind("UserDefinedNetwork") +type SpecGetter interface { + GetTopology() userdefinednetworkv1.NetworkTopology + GetLayer3() *userdefinednetworkv1.Layer3Config + GetLayer2() *userdefinednetworkv1.Layer2Config +} -// RenderNetAttachDefManifest return NetworkAttachmentDefinition according to the given UserDefinedNetwork spec -func RenderNetAttachDefManifest(udn *userdefinednetworkv1.UserDefinedNetwork) (*netv1.NetworkAttachmentDefinition, error) { - if udn == nil { +func ParseNetworkName(networkName string) (udnNamespace, udnName string) { + parts := strings.Split(networkName, ".") + if len(parts) == 2 { + return parts[0], parts[1] + } + return "", "" +} + +func RenderNetAttachDefManifest(obj client.Object, targetNamespace string) (*netv1.NetworkAttachmentDefinition, error) { + if obj == nil { return nil, nil } - if err := validateTopology(udn); err != nil { - return nil, fmt.Errorf("invalid topology spesifeid: %w", err) + if targetNamespace == "" { + return nil, fmt.Errorf("namspace should not be empty") } - cniNetConf, err := renderCNINetworkConfig(udn) - if err != nil { - return nil, fmt.Errorf("failed to render CNI network config: %w", err) + var ownerRef metav1.OwnerReference + var spec SpecGetter + var networkName string + switch o := obj.(type) { + case *userdefinednetworkv1.UserDefinedNetwork: + ownerRef = *metav1.NewControllerRef(obj, userdefinednetworkv1.SchemeGroupVersion.WithKind("UserDefinedNetwork")) + spec = &o.Spec + networkName = targetNamespace + "." + obj.GetName() + case *userdefinednetworkv1.ClusterUserDefinedNetwork: + ownerRef = *metav1.NewControllerRef(obj, userdefinednetworkv1.SchemeGroupVersion.WithKind("ClusterUserDefinedNetwork")) + spec = &o.Spec.Network + networkName = "cluster.udn." + obj.GetName() + default: + return nil, fmt.Errorf("unknown type %T", obj) } - cniNetConfRaw, err := json.Marshal(cniNetConf) + + nadName := util.GetNADName(targetNamespace, obj.GetName()) + + nadSpec, err := RenderNADSpec(networkName, nadName, spec) if err != nil { return nil, err } return &netv1.NetworkAttachmentDefinition{ ObjectMeta: metav1.ObjectMeta{ - Name: udn.Name, - OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(udn, udnKind)}, - Labels: map[string]string{ - labelUserDefinedNetwork: "", - }, - Finalizers: []string{FinalizerUserDefinedNetwork}, - }, - Spec: netv1.NetworkAttachmentDefinitionSpec{ - Config: string(cniNetConfRaw), + Name: obj.GetName(), + OwnerReferences: []metav1.OwnerReference{ownerRef}, + Labels: map[string]string{LabelUserDefinedNetwork: ""}, + Finalizers: []string{FinalizerUserDefinedNetwork}, }, + Spec: *nadSpec, }, nil } -func validateTopology(udn *userdefinednetworkv1.UserDefinedNetwork) error { - if udn.Spec.Topology == userdefinednetworkv1.NetworkTopologyLayer3 && udn.Spec.Layer3 == nil || - udn.Spec.Topology == userdefinednetworkv1.NetworkTopologyLayer2 && udn.Spec.Layer2 == nil { - return fmt.Errorf("topology %[1]s is specified but %[1]s config is nil", udn.Spec.Topology) +func RenderNADSpec(networkName, nadName string, spec SpecGetter) (*netv1.NetworkAttachmentDefinitionSpec, error) { + if err := validateTopology(spec); err != nil { + return nil, fmt.Errorf("invalid topology specified: %w", err) } - return nil + + cniNetConf, err := renderCNINetworkConfig(networkName, nadName, spec) + if err != nil { + return nil, fmt.Errorf("failed to render CNI network config: %w", err) + } + cniNetConfRaw, err := json.Marshal(cniNetConf) + if err != nil { + return nil, err + } + + return &netv1.NetworkAttachmentDefinitionSpec{ + Config: string(cniNetConfRaw), + }, nil } -func renderCNINetworkConfig(udn *userdefinednetworkv1.UserDefinedNetwork) (map[string]interface{}, error) { - networkName := udn.Namespace + "." + udn.Name - nadName := util.GetNADName(udn.Namespace, udn.Name) +func validateTopology(spec SpecGetter) error { + if spec.GetTopology() == userdefinednetworkv1.NetworkTopologyLayer3 && spec.GetLayer3() == nil || + spec.GetTopology() == userdefinednetworkv1.NetworkTopologyLayer2 && spec.GetLayer2() == nil { + return fmt.Errorf("topology %[1]s is specified but %[1]s config is nil", spec.GetTopology()) + } + return nil +} +func renderCNINetworkConfig(networkName, nadName string, spec SpecGetter) (map[string]interface{}, error) { netConfSpec := &ovncnitypes.NetConf{ NetConf: cnitypes.NetConf{ CNIVersion: cniVersion, @@ -80,18 +120,18 @@ func renderCNINetworkConfig(udn *userdefinednetworkv1.UserDefinedNetwork) (map[s Name: networkName, }, NADName: nadName, - Topology: strings.ToLower(string(udn.Spec.Topology)), + Topology: strings.ToLower(string(spec.GetTopology())), } - switch udn.Spec.Topology { + switch spec.GetTopology() { case userdefinednetworkv1.NetworkTopologyLayer3: - cfg := udn.Spec.Layer3 + cfg := spec.GetLayer3() netConfSpec.Role = strings.ToLower(string(cfg.Role)) netConfSpec.MTU = int(cfg.MTU) netConfSpec.Subnets = layer3SubnetsString(cfg.Subnets) netConfSpec.JoinSubnet = cidrString(renderJoinSubnets(cfg.Role, cfg.JoinSubnets)) case userdefinednetworkv1.NetworkTopologyLayer2: - cfg := udn.Spec.Layer2 + cfg := spec.GetLayer2() netConfSpec.Role = strings.ToLower(string(cfg.Role)) netConfSpec.MTU = int(cfg.MTU) netConfSpec.AllowPersistentIPs = cfg.IPAMLifecycle == userdefinednetworkv1.IPAMLifecyclePersistent @@ -175,3 +215,14 @@ func cidrString[T cidr](subnets T) string { } return strings.Join(cidrs, ",") } + +func GetSpec(obj client.Object) SpecGetter { + switch o := obj.(type) { + case *userdefinednetworkv1.UserDefinedNetwork: + return &o.Spec + case *userdefinednetworkv1.ClusterUserDefinedNetwork: + return &o.Spec.Network + default: + panic(fmt.Sprintf("unknown type %T", obj)) + } +} diff --git a/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template_suite_test.go b/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template_suite_test.go index 66de3f89b5..ac65fbbaa9 100644 --- a/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template_suite_test.go +++ b/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template_suite_test.go @@ -3,11 +3,11 @@ package template import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) -func TestNetworkAttachmetDefinitionTemplate(t *testing.T) { +func TestNetworkAttachmentDefinitionTemplate(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "NetworkAttachmentDefintion Template Suite") } diff --git a/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template_test.go b/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template_test.go index bad077d8df..29679a6cee 100644 --- a/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template_test.go +++ b/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template_test.go @@ -1,38 +1,26 @@ package template import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/ginkgo/extensions/table" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" netv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" udnv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" ) var _ = Describe("NetAttachDefTemplate", func() { - const udnTypeName = "UserDefinedNetwork" - - var udnApiVersion = udnv1.SchemeGroupVersion.String() - - DescribeTable("should fail given", + DescribeTable("should fail to render NAD spec given", func(spec *udnv1.UserDefinedNetworkSpec) { - udn := &udnv1.UserDefinedNetwork{ - ObjectMeta: metav1.ObjectMeta{Namespace: "mynamespace", Name: "test-net", UID: "1"}, - Spec: *spec, - } - _, err := RenderNetAttachDefManifest(udn) + _, err := RenderNADSpec("foo", "bar", spec) Expect(err).To(HaveOccurred()) }, - Entry("invalid topology: topology layer2 & layer3 config", - &udnv1.UserDefinedNetworkSpec{Topology: udnv1.NetworkTopologyLayer2, Layer3: &udnv1.Layer3Config{}}, - ), - Entry("invalid topology: topology layer3 & layer2 config", - &udnv1.UserDefinedNetworkSpec{Topology: udnv1.NetworkTopologyLayer3, Layer2: &udnv1.Layer2Config{}}, - ), Entry("invalid layer2 subnets", &udnv1.UserDefinedNetworkSpec{ Topology: udnv1.NetworkTopologyLayer2, @@ -182,20 +170,57 @@ var _ = Describe("NetAttachDefTemplate", func() { ), ) - It("should return nil given no NAD", func() { - _, err := RenderNetAttachDefManifest(nil) + DescribeTable("should fail to render NAD, given", + func(obj client.Object) { + _, err := RenderNetAttachDefManifest(obj, "") + Expect(err).To(HaveOccurred()) + }, + Entry("UDN, invalid topology: topology layer2 & layer3 config", + &udnv1.UserDefinedNetwork{Spec: udnv1.UserDefinedNetworkSpec{ + Topology: udnv1.NetworkTopologyLayer2, Layer3: &udnv1.Layer3Config{}}}, + ), + Entry("UDN, invalid topology: topology layer3 & layer2 config", + &udnv1.UserDefinedNetwork{Spec: udnv1.UserDefinedNetworkSpec{ + Topology: udnv1.NetworkTopologyLayer3, Layer2: &udnv1.Layer2Config{}}}, + ), + Entry("CUDN, invalid topology: topology layer2 & layer3 config", + &udnv1.ClusterUserDefinedNetwork{Spec: udnv1.ClusterUserDefinedNetworkSpec{Network: udnv1.NetworkSpec{ + Topology: udnv1.NetworkTopologyLayer2, Layer3: &udnv1.Layer3Config{}}}}, + ), + Entry("CUDN, invalid topology: topology layer3 & layer2 config", + &udnv1.ClusterUserDefinedNetwork{Spec: udnv1.ClusterUserDefinedNetworkSpec{Network: udnv1.NetworkSpec{ + Topology: udnv1.NetworkTopologyLayer3, Layer2: &udnv1.Layer2Config{}}}}, + ), + ) + + It("should return no error given no UDN", func() { + _, err := RenderNetAttachDefManifest(nil, "") Expect(err).NotTo(HaveOccurred()) }) - DescribeTable("should create net attach from spec", + It("should fail given no target namespace", func() { + cudn := &udnv1.UserDefinedNetwork{Spec: udnv1.UserDefinedNetworkSpec{ + Topology: udnv1.NetworkTopologyLayer2, Layer2: &udnv1.Layer2Config{}}, + } + _, err := RenderNetAttachDefManifest(cudn, "") + Expect(err).To(HaveOccurred()) + }) + + It("should fail given unknown type", func() { + _, err := RenderNetAttachDefManifest(&netv1.NetworkAttachmentDefinition{}, "foo") + Expect(err).To(HaveOccurred()) + }) + + DescribeTable("should create UDN NAD from spec", func(testSpec udnv1.UserDefinedNetworkSpec, expectedNadNetConf string) { testUdn := &udnv1.UserDefinedNetwork{ ObjectMeta: metav1.ObjectMeta{Namespace: "mynamespace", Name: "test-net", UID: "1"}, Spec: testSpec, } + testNs := "mynamespace" ownerRef := metav1.OwnerReference{ - APIVersion: udnApiVersion, - Kind: udnTypeName, + APIVersion: "k8s.ovn.org/v1", + Kind: "UserDefinedNetwork", Name: "test-net", UID: "1", BlockOwnerDeletion: pointer.Bool(true), @@ -211,7 +236,10 @@ var _ = Describe("NetAttachDefTemplate", func() { Spec: netv1.NetworkAttachmentDefinitionSpec{Config: expectedNadNetConf}, } - nad, err := RenderNetAttachDefManifest(testUdn) + // must be defined so the primary user defined network can match the ip families of the underlying cluster + config.IPv4Mode = true + config.IPv6Mode = true + nad, err := RenderNetAttachDefManifest(testUdn, testNs) Expect(err).NotTo(HaveOccurred()) Expect(nad.TypeMeta).To(Equal(expectedNAD.TypeMeta)) Expect(nad.ObjectMeta).To(Equal(expectedNAD.ObjectMeta)) @@ -311,4 +339,131 @@ var _ = Describe("NetAttachDefTemplate", func() { }`, ), ) + + DescribeTable("should create CUDN NAD from spec", + func(testSpec udnv1.NetworkSpec, expectedNadNetConf string) { + cudn := &udnv1.ClusterUserDefinedNetwork{ + ObjectMeta: metav1.ObjectMeta{Name: "test-net", UID: "1"}, + Spec: udnv1.ClusterUserDefinedNetworkSpec{Network: testSpec}, + } + testNs := "mynamespace" + + expectedOwnerRef := metav1.OwnerReference{ + APIVersion: "k8s.ovn.org/v1", + Kind: "ClusterUserDefinedNetwork", + Name: "test-net", + UID: "1", + BlockOwnerDeletion: pointer.Bool(true), + Controller: pointer.Bool(true), + } + expectedNAD := &netv1.NetworkAttachmentDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-net", + OwnerReferences: []metav1.OwnerReference{expectedOwnerRef}, + Labels: map[string]string{"k8s.ovn.org/user-defined-network": ""}, + Finalizers: []string{"k8s.ovn.org/user-defined-network-protection"}, + }, + Spec: netv1.NetworkAttachmentDefinitionSpec{Config: expectedNadNetConf}, + } + + nad, err := RenderNetAttachDefManifest(cudn, testNs) + Expect(err).NotTo(HaveOccurred()) + Expect(nad.TypeMeta).To(Equal(expectedNAD.TypeMeta)) + Expect(nad.ObjectMeta).To(Equal(expectedNAD.ObjectMeta)) + Expect(nad.Spec.Config).To(MatchJSON(expectedNAD.Spec.Config)) + }, + Entry("primary network, layer3", + udnv1.NetworkSpec{ + Topology: udnv1.NetworkTopologyLayer3, + Layer3: &udnv1.Layer3Config{ + Role: udnv1.NetworkRolePrimary, + Subnets: []udnv1.Layer3Subnet{ + {CIDR: "192.168.100.0/16"}, + {CIDR: "2001:dbb::/60"}, + }, + MTU: 1500, + }, + }, + `{ + "cniVersion": "1.0.0", + "type": "ovn-k8s-cni-overlay", + "name": "cluster.udn.test-net", + "netAttachDefName": "mynamespace/test-net", + "role": "primary", + "topology": "layer3", + "joinSubnets": "100.65.0.0/16,fd99::/64", + "subnets": "192.168.100.0/16,2001:dbb::/60", + "mtu": 1500 + }`, + ), + Entry("primary network, layer2", + udnv1.NetworkSpec{ + Topology: udnv1.NetworkTopologyLayer2, + Layer2: &udnv1.Layer2Config{ + Role: udnv1.NetworkRolePrimary, + Subnets: udnv1.DualStackCIDRs{"192.168.100.0/24", "2001:dbb::/64"}, + MTU: 1500, + IPAMLifecycle: udnv1.IPAMLifecyclePersistent, + }, + }, + `{ + "cniVersion": "1.0.0", + "type": "ovn-k8s-cni-overlay", + "name": "cluster.udn.test-net", + "netAttachDefName": "mynamespace/test-net", + "role": "primary", + "topology": "layer2", + "joinSubnets": "100.65.0.0/16,fd99::/64", + "subnets": "192.168.100.0/24,2001:dbb::/64", + "mtu": 1500, + "allowPersistentIPs": true + }`, + ), + Entry("primary network, should override join-subnets when specified", + udnv1.NetworkSpec{ + Topology: udnv1.NetworkTopologyLayer2, + Layer2: &udnv1.Layer2Config{ + Role: udnv1.NetworkRolePrimary, + Subnets: udnv1.DualStackCIDRs{"192.168.100.0/24", "2001:dbb::/64"}, + JoinSubnets: udnv1.DualStackCIDRs{"100.62.0.0/24", "fd92::/64"}, + MTU: 1500, + IPAMLifecycle: udnv1.IPAMLifecyclePersistent, + }, + }, + `{ + "cniVersion": "1.0.0", + "type": "ovn-k8s-cni-overlay", + "name": "cluster.udn.test-net", + "netAttachDefName": "mynamespace/test-net", + "role": "primary", + "topology": "layer2", + "joinSubnets": "100.62.0.0/24,fd92::/64", + "subnets": "192.168.100.0/24,2001:dbb::/64", + "mtu": 1500, + "allowPersistentIPs": true + }`, + ), + Entry("secondary network", + udnv1.NetworkSpec{ + Topology: udnv1.NetworkTopologyLayer2, + Layer2: &udnv1.Layer2Config{ + Role: udnv1.NetworkRoleSecondary, + Subnets: udnv1.DualStackCIDRs{"192.168.100.0/24", "2001:dbb::/64"}, + MTU: 1500, + IPAMLifecycle: udnv1.IPAMLifecyclePersistent, + }, + }, + `{ + "cniVersion": "1.0.0", + "type": "ovn-k8s-cni-overlay", + "name": "cluster.udn.test-net", + "netAttachDefName": "mynamespace/test-net", + "role": "secondary", + "topology": "layer2", + "subnets": "192.168.100.0/24,2001:dbb::/64", + "mtu": 1500, + "allowPersistentIPs": true + }`, + ), + ) }) diff --git a/go-controller/pkg/clustermanager/zone_cluster_controller.go b/go-controller/pkg/clustermanager/zone_cluster_controller.go index ae3301469f..99b20927f1 100644 --- a/go-controller/pkg/clustermanager/zone_cluster_controller.go +++ b/go-controller/pkg/clustermanager/zone_cluster_controller.go @@ -48,11 +48,7 @@ type zoneClusterController struct { func newZoneClusterController(ovnClient *util.OVNClusterManagerClientset, wf *factory.WatchFactory) (*zoneClusterController, error) { // Since we don't assign 0 to any node, create IDAllocator with one extra element in maxIds. - nodeIDAllocator, err := id.NewIDAllocator("NodeIDs", maxNodeIDs+1) - if err != nil { - return nil, fmt.Errorf("failed to create an IdAllocator for the nodes, err: %w", err) - } - + nodeIDAllocator := id.NewIDAllocator("NodeIDs", maxNodeIDs+1) // Reserve the id 0. We don't want to assign this id to any of the nodes. if err := nodeIDAllocator.ReserveID("zero", 0); err != nil { return nil, fmt.Errorf("idAllocator failed to reserve id 0") @@ -67,7 +63,7 @@ func newZoneClusterController(ovnClient *util.OVNClusterManagerClientset, wf *fa wg := &sync.WaitGroup{} var transitSwitchIPv4Generator, transitSwitchIPv6Generator *ipgenerator.IPGenerator - + var err error if config.OVNKubernetesFeature.EnableInterconnect { if config.IPv4Mode { transitSwitchIPv4Generator, err = ipgenerator.NewIPGenerator(config.ClusterManager.V4TransitSwitchSubnet) diff --git a/go-controller/pkg/cni/cni.go b/go-controller/pkg/cni/cni.go index 1180076bb3..cb615ff501 100644 --- a/go-controller/pkg/cni/cni.go +++ b/go-controller/pkg/cni/cni.go @@ -19,6 +19,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kubevirt" + nad "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/network-attach-def-controller" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) @@ -105,10 +106,12 @@ func (pr *PodRequest) checkOrUpdatePodUID(pod *kapi.Pod) error { return nil } -func (pr *PodRequest) cmdAdd(kubeAuth *KubeAPIAuth, clientset *ClientSet) (*Response, error) { - return pr.cmdAddWithGetCNIResultFunc(kubeAuth, clientset, getCNIResult) +func (pr *PodRequest) cmdAdd(kubeAuth *KubeAPIAuth, clientset *ClientSet, + nadController *nad.NetAttachDefinitionController) (*Response, error) { + return pr.cmdAddWithGetCNIResultFunc(kubeAuth, clientset, getCNIResult, nadController) } -func (pr *PodRequest) cmdAddWithGetCNIResultFunc(kubeAuth *KubeAPIAuth, clientset *ClientSet, getCNIResultFn getCNIResultFunc) (*Response, error) { +func (pr *PodRequest) cmdAddWithGetCNIResultFunc(kubeAuth *KubeAPIAuth, clientset *ClientSet, + getCNIResultFn getCNIResultFunc, nadController nad.NADController) (*Response, error) { namespace := pr.PodNamespace podName := pr.PodName if namespace == "" || podName == "" { @@ -141,7 +144,7 @@ func (pr *PodRequest) cmdAddWithGetCNIResultFunc(kubeAuth *KubeAPIAuth, clientse // Get the IP address and MAC address of the pod // for DPU, ensure connection-details is present - primaryUDN := udn.NewPrimaryNetwork(clientset.nadLister) + primaryUDN := udn.NewPrimaryNetwork(nadController) if util.IsNetworkSegmentationSupportEnabled() { annotCondFn = primaryUDN.WaitForPrimaryAnnotationFn(namespace, annotCondFn) } @@ -174,9 +177,24 @@ func (pr *PodRequest) cmdAddWithGetCNIResultFunc(kubeAuth *KubeAPIAuth, clientse if err != nil { return nil, err } - if _, err := getCNIResultFn(primaryUDNPodRequest, clientset, primaryUDNPodInfo); err != nil { + primaryUDNResult, err := getCNIResultFn(primaryUDNPodRequest, clientset, primaryUDNPodInfo) + if err != nil { return nil, err } + + response.Result.Routes = append(response.Result.Routes, primaryUDNResult.Routes...) + numOfInitialIPs := len(response.Result.IPs) + numOfInitialIfaces := len(response.Result.Interfaces) + response.Result.Interfaces = append(response.Result.Interfaces, primaryUDNResult.Interfaces...) + response.Result.IPs = append(response.Result.IPs, primaryUDNResult.IPs...) + + // Offset the index of the default network IPs to correctly point to the default network interfaces + for i := numOfInitialIPs; i < len(response.Result.IPs); i++ { + ifaceIPConfig := response.Result.IPs[i].Copy() + if response.Result.IPs[i].Interface != nil { + response.Result.IPs[i].Interface = current.Int(*ifaceIPConfig.Interface + numOfInitialIfaces) + } + } } } else { response.PodIFInfo = podInterfaceInfo @@ -276,7 +294,7 @@ func (pr *PodRequest) cmdCheck() error { // Argument '*PodRequest' encapsulates all the necessary information // kclient is passed in so that clientset can be reused from the server // Return value is the actual bytes to be sent back without further processing. -func HandlePodRequest(request *PodRequest, clientset *ClientSet, kubeAuth *KubeAPIAuth) ([]byte, error) { +func HandlePodRequest(request *PodRequest, clientset *ClientSet, kubeAuth *KubeAPIAuth, nadController *nad.NetAttachDefinitionController) ([]byte, error) { var result, resultForLogging []byte var response *Response var err, err1 error @@ -284,7 +302,7 @@ func HandlePodRequest(request *PodRequest, clientset *ClientSet, kubeAuth *KubeA klog.Infof("%s %s starting CNI request %+v", request, request.Command, request) switch request.Command { case CNIAdd: - response, err = request.cmdAdd(kubeAuth, clientset) + response, err = request.cmdAdd(kubeAuth, clientset, nadController) case CNIDel: response, err = request.cmdDel(clientset) case CNICheck: diff --git a/go-controller/pkg/cni/cni_dpu_test.go b/go-controller/pkg/cni/cni_dpu_test.go index e62495d31f..620b7faf42 100644 --- a/go-controller/pkg/cni/cni_dpu_test.go +++ b/go-controller/pkg/cni/cni_dpu_test.go @@ -5,7 +5,7 @@ import ( "time" cnitypes "github.com/containernetworking/cni/pkg/types" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" kubeMocks "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube/mocks" diff --git a/go-controller/pkg/cni/cni_suite_test.go b/go-controller/pkg/cni/cni_suite_test.go index b5b0f4d400..50244b17d4 100644 --- a/go-controller/pkg/cni/cni_suite_test.go +++ b/go-controller/pkg/cni/cni_suite_test.go @@ -3,7 +3,7 @@ package cni import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/cni/cni_test.go b/go-controller/pkg/cni/cni_test.go index 558acd19ed..78c97270d4 100644 --- a/go-controller/pkg/cni/cni_test.go +++ b/go-controller/pkg/cni/cni_test.go @@ -2,23 +2,30 @@ package cni import ( "context" + "fmt" + "net" "time" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" cnitypes "github.com/containernetworking/cni/pkg/types" current "github.com/containernetworking/cni/pkg/types/100" - v1nadmocks "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/mocks/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/listers/k8s.cni.cncf.io/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/kubernetes/fake" + nadv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + v1nadmocks "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/mocks/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/listers/k8s.cni.cncf.io/v1" v1mocks "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/mocks/k8s.io/client-go/listers/core/v1" + ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/nad" ovntypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) type podRequestInterfaceOpsStub struct { @@ -50,10 +57,13 @@ var _ = Describe("Network Segmentation", func() { } prInterfaceOpsStub = &podRequestInterfaceOpsStub{} enableMultiNetwork, enableNetworkSegmentation bool + nadController *ovntest.FakeNADController ) BeforeEach(func() { + config.IPv4Mode = true + config.IPv6Mode = true enableMultiNetwork = config.OVNKubernetesFeature.EnableMultiNetwork enableNetworkSegmentation = config.OVNKubernetesFeature.EnableNetworkSegmentation @@ -83,7 +93,6 @@ var _ = Describe("Network Segmentation", func() { nadLister = v1nadmocks.NetworkAttachmentDefinitionLister{} clientSet = &ClientSet{ podLister: &podLister, - nadLister: &nadLister, kclient: fakeClientset, } kubeAuth = &KubeAPIAuth{ @@ -117,7 +126,7 @@ var _ = Describe("Network Segmentation", func() { }) It("should not fail at cmdAdd", func() { podNamespaceLister.On("Get", pr.PodName).Return(pod, nil) - Expect(pr.cmdAddWithGetCNIResultFunc(kubeAuth, clientSet, getCNIResultStub)).NotTo(BeNil()) + Expect(pr.cmdAddWithGetCNIResultFunc(kubeAuth, clientSet, getCNIResultStub, nil)).NotTo(BeNil()) Expect(obtainedPodIterfaceInfos).ToNot(BeEmpty()) }) It("should not fail at cmdDel", func() { @@ -131,27 +140,191 @@ var _ = Describe("Network Segmentation", func() { BeforeEach(func() { config.OVNKubernetesFeature.EnableMultiNetwork = true config.OVNKubernetesFeature.EnableNetworkSegmentation = true - pod = &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: pr.PodName, - Namespace: pr.PodNamespace, - Annotations: map[string]string{ - "k8s.ovn.org/pod-networks": `{"default":{"ip_address":"100.10.10.3/24","mac_address":"0a:58:fd:98:00:01", "role":"primary"}}`, - }, - }, - } - }) - It("should not fail at cmdAdd", func() { - podNamespaceLister.On("Get", pr.PodName).Return(pod, nil) - Expect(pr.cmdAddWithGetCNIResultFunc(kubeAuth, clientSet, getCNIResultStub)).NotTo(BeNil()) - Expect(obtainedPodIterfaceInfos).ToNot(BeEmpty()) }) - It("should not fail at cmdDel", func() { - podNamespaceLister.On("Get", pr.PodName).Return(pod, nil) - Expect(pr.cmdDel(clientSet)).NotTo(BeNil()) - Expect(prInterfaceOpsStub.unconfiguredInterfaces).To(HaveLen(2)) + + Context("pod with default primary network", func() { + BeforeEach(func() { + pod = &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: pr.PodName, + Namespace: pr.PodNamespace, + Annotations: map[string]string{ + "k8s.ovn.org/pod-networks": `{"default":{"ip_address":"100.10.10.3/24","mac_address":"0a:58:fd:98:00:01", "role":"primary"}}`, + }, + }, + } + nadController = &ovntest.FakeNADController{ + PrimaryNetworks: make(map[string]util.NetInfo), + } + }) + It("should not fail at cmdAdd", func() { + podNamespaceLister.On("Get", pr.PodName).Return(pod, nil) + Expect(pr.cmdAddWithGetCNIResultFunc(kubeAuth, clientSet, getCNIResultStub, nadController)).NotTo(BeNil()) + Expect(obtainedPodIterfaceInfos).ToNot(BeEmpty()) + }) + It("should not fail at cmdDel", func() { + podNamespaceLister.On("Get", pr.PodName).Return(pod, nil) + Expect(pr.cmdDel(clientSet)).NotTo(BeNil()) + Expect(prInterfaceOpsStub.unconfiguredInterfaces).To(HaveLen(2)) + }) }) + Context("pod with a user defined primary network", func() { + const ( + dummyMACHostSide = "07:06:05:04:03:02" + nadName = "tenantred" + namespace = "foo-ns" + ) + + dummyGetCNIResult := func(request *PodRequest, getter PodInfoGetter, podInterfaceInfo *PodInterfaceInfo) (*current.Result, error) { + var gatewayIP net.IP + if len(podInterfaceInfo.Gateways) > 0 { + gatewayIP = podInterfaceInfo.Gateways[0] + } + var ips []*current.IPConfig + ifaceIdx := 1 // host side of the veth is 0; pod side of the veth is 1 + for _, ip := range podInterfaceInfo.IPs { + ips = append(ips, ¤t.IPConfig{Address: *ip, Gateway: gatewayIP, Interface: &ifaceIdx}) + } + ifaceName := "eth0" + if request.netName != "default" { + ifaceName = "ovn-udn1" + } + + interfaces := []*current.Interface{ + { + Name: "host_" + ifaceName, + Mac: dummyMACHostSide, + }, + { + Name: ifaceName, + Mac: podInterfaceInfo.MAC.String(), + Sandbox: "bobloblaw", + }, + } + return ¤t.Result{ + CNIVersion: "0.3.1", + Interfaces: interfaces, + IPs: ips, + }, nil + } + + BeforeEach(func() { + pod = &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: pr.PodName, + Namespace: pr.PodNamespace, + Annotations: map[string]string{ + "k8s.ovn.org/pod-networks": `{"default":{"ip_addresses":["100.10.10.3/24","fd44::33/64"],"mac_address":"0a:58:fd:98:00:01", "role":"infrastructure-locked"}, "meganet":{"ip_addresses":["10.10.10.30/24","fd10::3/64"],"mac_address":"02:03:04:05:06:07", "role":"primary"}}`, + }, + }, + } + nad := &nadv1.NetworkAttachmentDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: nadName, + Namespace: namespace, + }, + Spec: nadv1.NetworkAttachmentDefinitionSpec{ + Config: dummyPrimaryUDNConfig(namespace, nadName), + }, + } + nadNamespaceLister := &v1nadmocks.NetworkAttachmentDefinitionNamespaceLister{} + nadNamespaceLister.On("List", labels.Everything()).Return([]*nadv1.NetworkAttachmentDefinition{nad}, nil) + nadLister.On("NetworkAttachmentDefinitions", "foo-ns").Return(nadNamespaceLister) + nadNetwork, err := util.ParseNADInfo(nad) + Expect(err).NotTo(HaveOccurred()) + nadController = &ovntest.FakeNADController{ + PrimaryNetworks: make(map[string]util.NetInfo), + } + nadController.PrimaryNetworks[nad.Namespace] = nadNetwork + getCNIResultStub = dummyGetCNIResult + }) + + It("should return the information of both the default net and the primary UDN in the result", func() { + podNamespaceLister.On("Get", pr.PodName).Return(pod, nil) + response, err := pr.cmdAddWithGetCNIResultFunc(kubeAuth, clientSet, getCNIResultStub, nadController) + Expect(err).NotTo(HaveOccurred()) + // for every interface added, we return 2 interfaces; the host side of the + // veth, then the pod side of the veth. + // thus, the UDN interface idx will be 3: + // idx: iface + // 0: host side primary UDN + // 1: pod side default network + // 2: host side default network + // 3: pod side primary UDN + podDefaultClusterNetIfaceIDX := 1 + podUDNIfaceIDX := 3 + Expect(response.Result).To(Equal( + ¤t.Result{ + CNIVersion: "0.3.1", + Interfaces: []*current.Interface{ + { + Name: "host_eth0", + Mac: dummyMACHostSide, + }, + { + Name: "eth0", + Mac: "0a:58:fd:98:00:01", + Sandbox: "bobloblaw", + }, + { + Name: "host_ovn-udn1", + Mac: dummyMACHostSide, + }, + { + Name: "ovn-udn1", + Mac: "02:03:04:05:06:07", + Sandbox: "bobloblaw", + }, + }, + IPs: []*current.IPConfig{ + { + Address: net.IPNet{ + IP: net.ParseIP("100.10.10.3"), + Mask: net.CIDRMask(24, 32), + }, + Interface: &podDefaultClusterNetIfaceIDX, + }, + { + Address: net.IPNet{ + IP: net.ParseIP("fd44::33"), + Mask: net.CIDRMask(64, 128), + }, + Interface: &podDefaultClusterNetIfaceIDX, + }, + { + Address: net.IPNet{ + IP: net.ParseIP("10.10.10.30"), + Mask: net.CIDRMask(24, 32), + }, + Interface: &podUDNIfaceIDX, + }, + { + Address: net.IPNet{ + IP: net.ParseIP("fd10::3"), + Mask: net.CIDRMask(64, 128), + }, + Interface: &podUDNIfaceIDX, + }, + }, + }, + )) + }) + }) }) }) + +func dummyPrimaryUDNConfig(ns, nadName string) string { + namespacedName := fmt.Sprintf("%s/%s", ns, nadName) + return fmt.Sprintf(` + { + "name": "tenantred", + "type": "ovn-k8s-cni-overlay", + "topology": "layer2", + "subnets": "10.10.0.0/16,fd10::0/64", + "netAttachDefName": %q, + "role": "primary" + } +`, namespacedName) +} diff --git a/go-controller/pkg/cni/cniserver.go b/go-controller/pkg/cni/cniserver.go index 2c05fa94b3..bf30f4d207 100644 --- a/go-controller/pkg/cni/cniserver.go +++ b/go-controller/pkg/cni/cniserver.go @@ -18,6 +18,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" + nad "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/network-attach-def-controller" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) @@ -48,10 +49,12 @@ import ( // started. // NewCNIServer creates and returns a new Server object which will listen on a socket in the given path -func NewCNIServer(factory factory.NodeWatchFactory, kclient kubernetes.Interface) (*Server, error) { +func NewCNIServer(factory factory.NodeWatchFactory, kclient kubernetes.Interface, + nadController *nad.NetAttachDefinitionController) (*Server, error) { if config.OvnKubeNode.Mode == types.NodeModeDPU { return nil, fmt.Errorf("unsupported ovnkube-node mode for CNI server: %s", config.OvnKubeNode.Mode) } + router := mux.NewRouter() s := &Server{ @@ -72,7 +75,7 @@ func NewCNIServer(factory factory.NodeWatchFactory, kclient kubernetes.Interface } if util.IsNetworkSegmentationSupportEnabled() { - s.clientSet.nadLister = factory.NADInformer().Lister() + s.nadController = nadController } if len(config.Kubernetes.CAData) > 0 { @@ -218,7 +221,7 @@ func (s *Server) handleCNIRequest(r *http.Request) ([]byte, error) { } defer req.cancel() - result, err := s.handlePodRequestFunc(req, s.clientSet, s.kubeAuth) + result, err := s.handlePodRequestFunc(req, s.clientSet, s.kubeAuth, s.nadController) if err != nil { // Prefix error with request information for easier debugging return nil, fmt.Errorf("%s %v", req, err) diff --git a/go-controller/pkg/cni/cniserver_test.go b/go-controller/pkg/cni/cniserver_test.go index eee7d5b748..71dd2f4ede 100644 --- a/go-controller/pkg/cni/cniserver_test.go +++ b/go-controller/pkg/cni/cniserver_test.go @@ -23,6 +23,7 @@ import ( nadapi "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" + nad "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/network-attach-def-controller" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) @@ -48,7 +49,7 @@ func clientDoCNI(t *testing.T, client *http.Client, req *Request) ([]byte, int) var expectedResult cnitypes.Result -func serverHandleCNI(request *PodRequest, clientset *ClientSet, kubeAuth *KubeAPIAuth) ([]byte, error) { +func serverHandleCNI(request *PodRequest, clientset *ClientSet, kubeAuth *KubeAPIAuth, nadController *nad.NetAttachDefinitionController) ([]byte, error) { if request.Command == CNIAdd { return json.Marshal(&expectedResult) } else if request.Command == CNIDel || request.Command == CNIUpdate || request.Command == CNICheck { @@ -90,7 +91,7 @@ func TestCNIServer(t *testing.T) { t.Fatalf("failed to start watch factory: %v", err) } - s, err := NewCNIServer(wf, fakeClient) + s, err := NewCNIServer(wf, fakeClient, nil) if err != nil { t.Fatalf("error creating CNI server: %v", err) } diff --git a/go-controller/pkg/cni/ovs_test.go b/go-controller/pkg/cni/ovs_test.go index b09e5c117e..2bdc00ad02 100644 --- a/go-controller/pkg/cni/ovs_test.go +++ b/go-controller/pkg/cni/ovs_test.go @@ -5,7 +5,7 @@ import ( ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/cni/types.go b/go-controller/pkg/cni/types.go index b661473064..edff41df08 100644 --- a/go-controller/pkg/cni/types.go +++ b/go-controller/pkg/cni/types.go @@ -12,11 +12,10 @@ import ( "k8s.io/klog/v2" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" + nad "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/network-attach-def-controller" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" nadapi "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" - nadlister "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/listers/k8s.cni.cncf.io/v1" - kapi "k8s.io/api/core/v1" ) @@ -169,7 +168,7 @@ type PodRequest struct { deviceInfo nadapi.DeviceInfo } -type podRequestFunc func(request *PodRequest, clientset *ClientSet, kubeAuth *KubeAPIAuth) ([]byte, error) +type podRequestFunc func(request *PodRequest, clientset *ClientSet, kubeAuth *KubeAPIAuth, nadController *nad.NetAttachDefinitionController) ([]byte, error) type getCNIResultFunc func(request *PodRequest, getter PodInfoGetter, podInterfaceInfo *PodInterfaceInfo) (*current.Result, error) type PodInfoGetter interface { @@ -180,7 +179,6 @@ type ClientSet struct { PodInfoGetter kclient kubernetes.Interface podLister corev1listers.PodLister - nadLister nadlister.NetworkAttachmentDefinitionLister } func NewClientSet(kclient kubernetes.Interface, podLister corev1listers.PodLister) *ClientSet { @@ -197,4 +195,5 @@ type Server struct { handlePodRequestFunc podRequestFunc clientSet *ClientSet kubeAuth *KubeAPIAuth + nadController *nad.NetAttachDefinitionController } diff --git a/go-controller/pkg/cni/types/types.go b/go-controller/pkg/cni/types/types.go index 442b1a6c76..88b8f6a83d 100644 --- a/go-controller/pkg/cni/types/types.go +++ b/go-controller/pkg/cni/types/types.go @@ -50,6 +50,14 @@ type NetConf struct { // restart. AllowPersistentIPs bool `json:"allowPersistentIPs,omitempty"` + // PhysicalNetworkName indicates the name of the physical network to which + // the OVN overlay will connect. Only applies to `localnet` topologies. + // When omitted, the physical network name of the network will be the value + // of the `name` attribute. + // This attribute allows multiple overlays to share the same physical + // network mapping in the hosts. + PhysicalNetworkName string `json:"physicalNetworkName,omitempty"` + // PciAddrs in case of using sriov or Auxiliry device name in case of SF DeviceID string `json:"deviceID,omitempty"` // LogFile to log all the messages from cni shim binary to diff --git a/go-controller/pkg/cni/udn/primary_network.go b/go-controller/pkg/cni/udn/primary_network.go index 62220f1b85..f6718a7ac3 100644 --- a/go-controller/pkg/cni/udn/primary_network.go +++ b/go-controller/pkg/cni/udn/primary_network.go @@ -5,8 +5,7 @@ import ( "k8s.io/klog/v2" - nadlister "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/listers/k8s.cni.cncf.io/v1" - + nad "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/network-attach-def-controller" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) @@ -15,14 +14,14 @@ import ( type podAnnotWaitCond = func(map[string]string, string) (*util.PodAnnotation, bool) type UserDefinedPrimaryNetwork struct { - nadLister nadlister.NetworkAttachmentDefinitionLister + nadController nad.NADController annotation *util.PodAnnotation activeNetwork util.NetInfo } -func NewPrimaryNetwork(nadLister nadlister.NetworkAttachmentDefinitionLister) *UserDefinedPrimaryNetwork { +func NewPrimaryNetwork(nadController nad.NADController) *UserDefinedPrimaryNetwork { return &UserDefinedPrimaryNetwork{ - nadLister: nadLister, + nadController: nadController, } } @@ -75,7 +74,7 @@ func (p *UserDefinedPrimaryNetwork) WaitForPrimaryAnnotationFn(namespace string, return nil, false } if err := p.ensure(namespace, annotations, nadName, annotation); err != nil { - klog.Errorf("Failed ensuring user defined primary network: %v", err) + klog.Errorf("Failed ensuring user defined primary network for nad '%s': %v", nadName, err) return nil, false } return annotation, isReady @@ -112,10 +111,10 @@ func (p *UserDefinedPrimaryNetwork) ensure(namespace string, annotations map[str } if err := p.ensureAnnotation(annotations); err != nil { - return fmt.Errorf("failed looking for primary network annotation: %w", err) + return fmt.Errorf("failed looking for primary network annotation for nad '%s': %w", nadName, err) } if err := p.ensureActiveNetwork(namespace); err != nil { - return fmt.Errorf("failed looking for primary network name: %w", err) + return fmt.Errorf("failed looking for primary network name for nad '%s': %w", nadName, err) } return nil } @@ -124,12 +123,12 @@ func (p *UserDefinedPrimaryNetwork) ensureActiveNetwork(namespace string) error if p.activeNetwork != nil { return nil } - activeNetwork, err := util.GetActiveNetworkForNamespace(namespace, p.nadLister) + activeNetwork, err := p.nadController.GetActiveNetworkForNamespace(namespace) if err != nil { return err } if activeNetwork.IsDefault() { - return fmt.Errorf("missing primary user defined network NAD") + return fmt.Errorf("missing primary user defined network NAD for namespace '%s'", namespace) } p.activeNetwork = activeNetwork return nil diff --git a/go-controller/pkg/cni/udn/primary_network_test.go b/go-controller/pkg/cni/udn/primary_network_test.go index 71bd146a0f..08cb692fa4 100644 --- a/go-controller/pkg/cni/udn/primary_network_test.go +++ b/go-controller/pkg/cni/udn/primary_network_test.go @@ -3,14 +3,14 @@ package udn import ( "testing" + nadapi "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/labels" - nadapi "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" v1nadmocks "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/mocks/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/listers/k8s.cni.cncf.io/v1" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/nad" types "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) @@ -177,6 +177,9 @@ func TestWaitForPrimaryAnnotationFn(t *testing.T) { for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { g := NewWithT(t) + // needs to be set so the primary user defined networks can use ipfamilies supported by the underlying cluster + config.IPv4Mode = true + config.IPv6Mode = true nadLister := v1nadmocks.NetworkAttachmentDefinitionLister{} nadNamespaceLister := v1nadmocks.NetworkAttachmentDefinitionNamespaceLister{} nadLister.On("NetworkAttachmentDefinitions", tt.namespace).Return(&nadNamespaceLister) @@ -184,7 +187,21 @@ func TestWaitForPrimaryAnnotationFn(t *testing.T) { waitCond := func(map[string]string, string) (*util.PodAnnotation, bool) { return tt.annotationFromFn, tt.isReadyFromFn } - userDefinedPrimaryNetwork := NewPrimaryNetwork(&nadLister) + + nadController := &nad.FakeNADController{ + PrimaryNetworks: map[string]util.NetInfo{}, + } + for _, nad := range tt.nads { + nadNetwork, _ := util.ParseNADInfo(nad) + nadNetwork.SetNADs(util.GetNADName(nad.Namespace, nad.Name)) + if nadNetwork.IsPrimaryNetwork() { + if _, loaded := nadController.PrimaryNetworks[nad.Namespace]; !loaded { + nadController.PrimaryNetworks[nad.Namespace] = nadNetwork + } + } + } + + userDefinedPrimaryNetwork := NewPrimaryNetwork(nadController) obtainedAnnotation, obtainedIsReady := userDefinedPrimaryNetwork.WaitForPrimaryAnnotationFn(tt.namespace, waitCond)(tt.annotations, tt.nadName) obtainedFound := userDefinedPrimaryNetwork.Found() obtainedNetworkName := userDefinedPrimaryNetwork.NetworkName() diff --git a/go-controller/pkg/cni/utils_test.go b/go-controller/pkg/cni/utils_test.go index 312aba569b..0ec49df2c4 100644 --- a/go-controller/pkg/cni/utils_test.go +++ b/go-controller/pkg/cni/utils_test.go @@ -5,7 +5,7 @@ import ( "fmt" "time" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/stretchr/testify/mock" diff --git a/go-controller/pkg/config/config.go b/go-controller/pkg/config/config.go index f174f1c00e..dab8145de3 100644 --- a/go-controller/pkg/config/config.go +++ b/go-controller/pkg/config/config.go @@ -13,14 +13,14 @@ import ( "strings" "time" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "github.com/urfave/cli/v2" gcfg "gopkg.in/gcfg.v1" lumberjack "gopkg.in/natefinch/lumberjack.v2" + "k8s.io/apimachinery/pkg/api/validation" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" - kexec "k8s.io/utils/exec" utilnet "k8s.io/utils/net" @@ -60,19 +60,20 @@ var ( // Default holds parsed config file parameters and command-line overrides Default = DefaultConfig{ - MTU: 1400, - ConntrackZone: 64000, - EncapType: "geneve", - EncapIP: "", - EncapPort: DefaultEncapPort, - InactivityProbe: 100000, // in Milliseconds - OpenFlowProbe: 180, // in Seconds - OfctrlWaitBeforeClear: 0, // in Milliseconds - MonitorAll: true, - OVSDBTxnTimeout: DefaultDBTxnTimeout, - LFlowCacheEnable: true, - RawClusterSubnets: "10.128.0.0/14/23", - Zone: types.OvnDefaultZone, + MTU: 1400, + ConntrackZone: 64000, + EncapType: "geneve", + EncapIP: "", + EncapPort: DefaultEncapPort, + InactivityProbe: 100000, // in Milliseconds + OpenFlowProbe: 180, // in Seconds + OfctrlWaitBeforeClear: 0, // in Milliseconds + MonitorAll: true, + OVSDBTxnTimeout: DefaultDBTxnTimeout, + LFlowCacheEnable: true, + RawClusterSubnets: "10.128.0.0/14/23", + Zone: types.OvnDefaultZone, + RawUDNAllowedDefaultServices: "default/kubernetes,kube-system/kube-dns", } // Logging holds logging-related parsed config file parameters and command-line overrides @@ -110,13 +111,14 @@ var ( // Kubernetes holds Kubernetes-related parsed config file parameters and command-line overrides Kubernetes = KubernetesConfig{ - APIServer: DefaultAPIServer, - RawServiceCIDRs: "172.16.1.0/24", - OVNConfigNamespace: "ovn-kubernetes", - HostNetworkNamespace: "", - PlatformType: "", - DNSServiceNamespace: "kube-system", - DNSServiceName: "kube-dns", + APIServer: DefaultAPIServer, + RawServiceCIDRs: "172.16.1.0/24", + OVNConfigNamespace: "ovn-kubernetes", + HostNetworkNamespace: "", + DisableRequestedChassis: false, + PlatformType: "", + DNSServiceNamespace: "kube-system", + DNSServiceName: "kube-dns", // By default, use a short lifetime length for certificates to ensure that the automatic rotation works well, // might revisit in the future to use a more sensible value CertDuration: 10 * time.Minute, @@ -280,6 +282,14 @@ type DefaultConfig struct { // Zone name to which ovnkube-node/ovnkube-controller belongs to Zone string `gcfg:"zone"` + + // RawUDNAllowedDefaultServices holds the unparsed UDNAllowedDefaultServices. Should only be + // used inside config module. + RawUDNAllowedDefaultServices string `gcfg:"udn-allowed-default-services"` + + // UDNAllowedDefaultServices holds a list of namespaced names of + // default cluster network services accessible from primary user-defined networks + UDNAllowedDefaultServices []string } // LoggingConfig holds logging-related parsed config file parameters and command-line overrides @@ -346,26 +356,27 @@ type CNIConfig struct { // KubernetesConfig holds Kubernetes-related parsed config file parameters and command-line overrides type KubernetesConfig struct { - BootstrapKubeconfig string `gcfg:"bootstrap-kubeconfig"` - CertDir string `gcfg:"cert-dir"` - CertDuration time.Duration `gcfg:"cert-duration"` - Kubeconfig string `gcfg:"kubeconfig"` - CACert string `gcfg:"cacert"` - CAData []byte - APIServer string `gcfg:"apiserver"` - Token string `gcfg:"token"` - TokenFile string `gcfg:"tokenFile"` - CompatServiceCIDR string `gcfg:"service-cidr"` - RawServiceCIDRs string `gcfg:"service-cidrs"` - ServiceCIDRs []*net.IPNet - OVNConfigNamespace string `gcfg:"ovn-config-namespace"` - OVNEmptyLbEvents bool `gcfg:"ovn-empty-lb-events"` - PodIP string `gcfg:"pod-ip"` // UNUSED - RawNoHostSubnetNodes string `gcfg:"no-hostsubnet-nodes"` - NoHostSubnetNodes labels.Selector - HostNetworkNamespace string `gcfg:"host-network-namespace"` - PlatformType string `gcfg:"platform-type"` - HealthzBindAddress string `gcfg:"healthz-bind-address"` + BootstrapKubeconfig string `gcfg:"bootstrap-kubeconfig"` + CertDir string `gcfg:"cert-dir"` + CertDuration time.Duration `gcfg:"cert-duration"` + Kubeconfig string `gcfg:"kubeconfig"` + CACert string `gcfg:"cacert"` + CAData []byte + APIServer string `gcfg:"apiserver"` + Token string `gcfg:"token"` + TokenFile string `gcfg:"tokenFile"` + CompatServiceCIDR string `gcfg:"service-cidr"` + RawServiceCIDRs string `gcfg:"service-cidrs"` + ServiceCIDRs []*net.IPNet + OVNConfigNamespace string `gcfg:"ovn-config-namespace"` + OVNEmptyLbEvents bool `gcfg:"ovn-empty-lb-events"` + PodIP string `gcfg:"pod-ip"` // UNUSED + RawNoHostSubnetNodes string `gcfg:"no-hostsubnet-nodes"` + NoHostSubnetNodes labels.Selector + HostNetworkNamespace string `gcfg:"host-network-namespace"` + DisableRequestedChassis bool `gcfg:"disable-requestedchassis"` + PlatformType string `gcfg:"platform-type"` + HealthzBindAddress string `gcfg:"healthz-bind-address"` // CompatMetricsBindAddress is overridden by the corresponding option in MetricsConfig CompatMetricsBindAddress string `gcfg:"metrics-bind-address"` @@ -406,13 +417,18 @@ type OVNKubernetesFeatureConfig struct { EgressIPNodeHealthCheckPort int `gcfg:"egressip-node-healthcheck-port"` EnableMultiNetwork bool `gcfg:"enable-multi-network"` EnableNetworkSegmentation bool `gcfg:"enable-network-segmentation"` - EnableMultiNetworkPolicy bool `gcfg:"enable-multi-networkpolicy"` - EnableStatelessNetPol bool `gcfg:"enable-stateless-netpol"` - EnableInterconnect bool `gcfg:"enable-interconnect"` - EnableMultiExternalGateway bool `gcfg:"enable-multi-external-gateway"` - EnablePersistentIPs bool `gcfg:"enable-persistent-ips"` - EnableDNSNameResolver bool `gcfg:"enable-dns-name-resolver"` - EnableServiceTemplateSupport bool `gcfg:"enable-svc-template-support"` + EnableRouteAdvertisements bool `gcfg:"enable-route-advertisements"` + // This feature requires a kernel fix https://github.com/torvalds/linux/commit/7f3287db654395f9c5ddd246325ff7889f550286 + // to work on a kind cluster. Flag allows to disable it for current CI, will be turned on when github runners have this fix. + DisableUDNHostIsolation bool `gcfg:"disable-udn-host-isolation"` + EnableMultiNetworkPolicy bool `gcfg:"enable-multi-networkpolicy"` + EnableStatelessNetPol bool `gcfg:"enable-stateless-netpol"` + EnableInterconnect bool `gcfg:"enable-interconnect"` + EnableMultiExternalGateway bool `gcfg:"enable-multi-external-gateway"` + EnablePersistentIPs bool `gcfg:"enable-persistent-ips"` + EnableDNSNameResolver bool `gcfg:"enable-dns-name-resolver"` + EnableServiceTemplateSupport bool `gcfg:"enable-svc-template-support"` + EnableObservability bool `gcfg:"enable-observability"` } // GatewayMode holds the node gateway mode @@ -635,6 +651,7 @@ func PrepareTestConfig() error { HybridOverlay = savedHybridOverlay OvnKubeNode = savedOvnKubeNode ClusterManager = savedClusterManager + Kubernetes.DisableRequestedChassis = false EnableMulticast = false Default.OVSDBTxnTimeout = 5 * time.Second @@ -920,6 +937,14 @@ var CommonFlags = []cli.Flag{ Value: Default.Zone, Destination: &cliConfig.Default.Zone, }, + &cli.StringFlag{ + Name: "udn-allowed-default-services", + Usage: "a list of namespaced names of default cluster network services accessible from primary" + + "user-defined networks. If not specified defaults to [\"default/kubernetes\", \"kube-system/kube-dns\"]." + + "Only used when enable-network-segmentation is set", + Value: Default.RawUDNAllowedDefaultServices, + Destination: &cliConfig.Default.RawUDNAllowedDefaultServices, + }, } // MonitoringFlags capture monitoring-related options @@ -1035,12 +1060,24 @@ var OVNK8sFeatureFlags = []cli.Flag{ Destination: &cliConfig.OVNKubernetesFeature.EnableMultiNetworkPolicy, Value: OVNKubernetesFeature.EnableMultiNetworkPolicy, }, + &cli.BoolFlag{ + Name: "disable-udn-host-isolation", + Usage: "Configure to disable UDN host isolation with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.DisableUDNHostIsolation, + Value: true, + }, &cli.BoolFlag{ Name: "enable-network-segmentation", Usage: "Configure to use network segmentation feature with ovn-kubernetes.", Destination: &cliConfig.OVNKubernetesFeature.EnableNetworkSegmentation, Value: OVNKubernetesFeature.EnableNetworkSegmentation, }, + &cli.BoolFlag{ + Name: "enable-route-advertisements", + Usage: "Configure to use route advertisements feature with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnableRouteAdvertisements, + Value: OVNKubernetesFeature.EnableRouteAdvertisements, + }, &cli.BoolFlag{ Name: "enable-stateless-netpol", Usage: "Configure to use stateless network policy feature with ovn-kubernetes.", @@ -1083,6 +1120,12 @@ var OVNK8sFeatureFlags = []cli.Flag{ Destination: &cliConfig.OVNKubernetesFeature.EnableServiceTemplateSupport, Value: OVNKubernetesFeature.EnableServiceTemplateSupport, }, + &cli.BoolFlag{ + Name: "enable-observability", + Usage: "Configure to use OVN sampling with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnableObservability, + Value: OVNKubernetesFeature.EnableObservability, + }, } // K8sFlags capture Kubernetes-related options @@ -1177,6 +1220,12 @@ var K8sFlags = []cli.Flag{ Destination: &cliConfig.Kubernetes.HostNetworkNamespace, Value: Kubernetes.HostNetworkNamespace, }, + &cli.BoolFlag{ + Name: "disable-requestedchassis", + Usage: "If set to true, requested-chassis option will not be set during pod creation", + Destination: &cliConfig.Kubernetes.DisableRequestedChassis, + Value: Kubernetes.DisableRequestedChassis, + }, &cli.StringFlag{ Name: "platform-type", Usage: "The cloud provider platform type ovn-kubernetes is deployed on. " + @@ -1752,7 +1801,7 @@ func buildKubernetesConfig(exec kexec.Interface, cli, file *config, saPath strin // completeKubernetesConfig completes the Kubernetes config by parsing raw values // into their final form. -func completeKubernetesConfig(allSubnets *configSubnets) error { +func completeKubernetesConfig(allSubnets *ConfigSubnets) error { Kubernetes.ServiceCIDRs = []*net.IPNet{} for _, cidrString := range strings.Split(Kubernetes.RawServiceCIDRs, ",") { _, serviceCIDR, err := net.ParseCIDR(cidrString) @@ -1760,7 +1809,7 @@ func completeKubernetesConfig(allSubnets *configSubnets) error { return fmt.Errorf("kubernetes service network CIDR %q invalid: %v", cidrString, err) } Kubernetes.ServiceCIDRs = append(Kubernetes.ServiceCIDRs, serviceCIDR) - allSubnets.append(configSubnetService, serviceCIDR) + allSubnets.Append(ConfigSubnetService, serviceCIDR) } if len(Kubernetes.ServiceCIDRs) > 2 { return fmt.Errorf("kubernetes service-cidrs must contain either a single CIDR or else an IPv4/IPv6 pair") @@ -1858,7 +1907,7 @@ func buildGatewayConfig(ctx *cli.Context, cli, file *config) error { return nil } -func completeGatewayConfig(allSubnets *configSubnets, masqueradeIPs *MasqueradeIPsConfig) error { +func completeGatewayConfig(allSubnets *ConfigSubnets, masqueradeIPs *MasqueradeIPsConfig) error { // Validate v4 and v6 join subnets v4IP, v4JoinCIDR, err := net.ParseCIDR(Gateway.V4JoinSubnet) if err != nil || utilnet.IsIPv6(v4IP) { @@ -1869,8 +1918,8 @@ func completeGatewayConfig(allSubnets *configSubnets, masqueradeIPs *MasqueradeI if err != nil || !utilnet.IsIPv6(v6IP) { return fmt.Errorf("invalid gateway v6 join subnet specified, subnet: %s: error: %v", Gateway.V6JoinSubnet, err) } - allSubnets.append(configSubnetJoin, v4JoinCIDR) - allSubnets.append(configSubnetJoin, v6JoinCIDR) + allSubnets.Append(ConfigSubnetJoin, v4JoinCIDR) + allSubnets.Append(ConfigSubnetJoin, v6JoinCIDR) //validate v4 and v6 masquerade subnets v4MasqueradeIP, v4MasqueradeCIDR, err := net.ParseCIDR(Gateway.V4MasqueradeSubnet) @@ -1889,8 +1938,8 @@ func completeGatewayConfig(allSubnets *configSubnets, masqueradeIPs *MasqueradeI return fmt.Errorf("unable to allocate V6MasqueradeIPs: %s", err) } - allSubnets.append(configSubnetMasquerade, v4MasqueradeCIDR) - allSubnets.append(configSubnetMasquerade, v6MasqueradeCIDR) + allSubnets.Append(ConfigSubnetMasquerade, v4MasqueradeCIDR) + allSubnets.Append(ConfigSubnetMasquerade, v6MasqueradeCIDR) return nil } @@ -2020,7 +2069,7 @@ func buildHybridOverlayConfig(ctx *cli.Context, cli, file *config) error { // completeHybridOverlayConfig completes the HybridOverlay config by parsing raw values // into their final form. -func completeHybridOverlayConfig(allSubnets *configSubnets) error { +func completeHybridOverlayConfig(allSubnets *ConfigSubnets) error { if !HybridOverlay.Enabled || len(HybridOverlay.RawClusterSubnets) == 0 { return nil } @@ -2031,7 +2080,7 @@ func completeHybridOverlayConfig(allSubnets *configSubnets) error { return fmt.Errorf("hybrid overlay cluster subnet invalid: %v", err) } for _, subnet := range HybridOverlay.ClusterSubnets { - allSubnets.append(configSubnetHybrid, subnet.CIDR) + allSubnets.Append(ConfigSubnetHybrid, subnet.CIDR) } return nil @@ -2053,7 +2102,7 @@ func buildClusterManagerConfig(ctx *cli.Context, cli, file *config) error { // completeClusterManagerConfig completes the ClusterManager config by parsing raw values // into their final form. -func completeClusterManagerConfig(allSubnets *configSubnets) error { +func completeClusterManagerConfig(allSubnets *ConfigSubnets) error { // Validate v4 and v6 transit switch subnets v4IP, v4TransitCIDR, err := net.ParseCIDR(ClusterManager.V4TransitSwitchSubnet) if err != nil || utilnet.IsIPv6(v4IP) { @@ -2064,8 +2113,8 @@ func completeClusterManagerConfig(allSubnets *configSubnets) error { if err != nil || !utilnet.IsIPv6(v6IP) { return fmt.Errorf("invalid transit switch v6 subnet specified, subnet: %s: error: %v", ClusterManager.V6TransitSwitchSubnet, err) } - allSubnets.append(configSubnetTransit, v4TransitCIDR) - allSubnets.append(configSubnetTransit, v6TransitCIDR) + allSubnets.Append(ConfigSubnetTransit, v4TransitCIDR) + allSubnets.Append(ConfigSubnetTransit, v6TransitCIDR) return nil } @@ -2094,14 +2143,19 @@ func buildDefaultConfig(cli, file *config) error { // completeDefaultConfig completes the Default config by parsing raw values // into their final form. -func completeDefaultConfig(allSubnets *configSubnets) error { +func completeDefaultConfig(allSubnets *ConfigSubnets) error { var err error Default.ClusterSubnets, err = ParseClusterSubnetEntries(Default.RawClusterSubnets) if err != nil { return fmt.Errorf("cluster subnet invalid: %v", err) } for _, subnet := range Default.ClusterSubnets { - allSubnets.append(configSubnetCluster, subnet.CIDR) + allSubnets.Append(ConfigSubnetCluster, subnet.CIDR) + } + + Default.UDNAllowedDefaultServices, err = parseServicesNamespacedNames(Default.RawUDNAllowedDefaultServices) + if err != nil { + return fmt.Errorf("UDN allowed services field is invalid: %v", err) } Default.HostMasqConntrackZone = Default.ConntrackZone + 1 @@ -2111,6 +2165,27 @@ func completeDefaultConfig(allSubnets *configSubnets) error { return nil } +// parseServicesNamespacedNames splits the input string by `,` and returns a slice +// of keys that were verified to be a valid namespaced service name. It ignores spaces between the elements. +func parseServicesNamespacedNames(servicesRaw string) ([]string, error) { + var services []string + for _, udnEnabledSVC := range strings.Split(servicesRaw, ",") { + svcKey := strings.TrimSpace(udnEnabledSVC) + namespace, name, err := cache.SplitMetaNamespaceKey(strings.TrimSpace(svcKey)) + if namespace == "" { + return nil, fmt.Errorf("UDN enabled service %q no namespace set: %v", svcKey, err) + } + if errs := validation.ValidateNamespaceName(namespace, false); len(errs) != 0 { + return nil, fmt.Errorf("UDN enabled service %q has an invalid namespace: %v", svcKey, err) + } + if errs := validation.NameIsDNSSubdomain(name, false); len(errs) != 0 { + return nil, fmt.Errorf("UDN enabled service %q has an invalid name: %v", svcKey, err) + } + services = append(services, svcKey) + } + return services, nil +} + // getConfigFilePath returns config file path and 'true' if the config file is // the fallback path (eg not given by the user), 'false' if given explicitly // by the user @@ -2326,7 +2401,7 @@ func initConfigWithPath(ctx *cli.Context, exec kexec.Interface, saPath string, d } func completeConfig() error { - allSubnets := newConfigSubnets() + allSubnets := NewConfigSubnets() if err := completeKubernetesConfig(allSubnets); err != nil { return err @@ -2349,7 +2424,7 @@ func completeConfig() error { return err } - if err := allSubnets.checkForOverlaps(); err != nil { + if err := allSubnets.CheckForOverlaps(); err != nil { return err } diff --git a/go-controller/pkg/config/config_test.go b/go-controller/pkg/config/config_test.go index f434ff9b67..6fe6d42c51 100644 --- a/go-controller/pkg/config/config_test.go +++ b/go-controller/pkg/config/config_test.go @@ -9,12 +9,13 @@ import ( "testing" "time" - ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/urfave/cli/v2" kexec "k8s.io/utils/exec" - . "github.com/onsi/ginkgo" + ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + + . "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" ) @@ -154,6 +155,7 @@ no-hostsubnet-nodes=label=another-test-label healthz-bind-address=0.0.0.0:1234 dns-service-namespace=kube-system-f dns-service-name=kube-dns-f +disable-requestedchassis=false [metrics] bind-address=1.1.1.1:8080 @@ -225,6 +227,7 @@ egressip-node-healthcheck-port=1234 enable-multi-network=false enable-multi-networkpolicy=false enable-network-segmentation=false +enable-route-advertisements=false enable-interconnect=false enable-multi-external-gateway=false enable-admin-network-policy=false @@ -335,6 +338,7 @@ var _ = Describe("Config Operations", func() { gomega.Expect(OVNKubernetesFeature.EgressIPNodeHealthCheckPort).To(gomega.Equal(0)) gomega.Expect(OVNKubernetesFeature.EnableMultiNetwork).To(gomega.BeFalse()) gomega.Expect(OVNKubernetesFeature.EnableNetworkSegmentation).To(gomega.BeFalse()) + gomega.Expect(OVNKubernetesFeature.EnableRouteAdvertisements).To(gomega.BeFalse()) gomega.Expect(OVNKubernetesFeature.EnableMultiNetworkPolicy).To(gomega.BeFalse()) gomega.Expect(OVNKubernetesFeature.EnableInterconnect).To(gomega.BeFalse()) gomega.Expect(OVNKubernetesFeature.EnableMultiExternalGateway).To(gomega.BeFalse()) @@ -593,6 +597,7 @@ var _ = Describe("Config Operations", func() { "enable-multi-network=true", "enable-multi-networkpolicy=true", "enable-network-segmentation=true", + "enable-route-advertisements=true", "enable-interconnect=true", "enable-multi-external-gateway=true", "enable-admin-network-policy=true", @@ -682,6 +687,7 @@ var _ = Describe("Config Operations", func() { gomega.Expect(OVNKubernetesFeature.EgressIPNodeHealthCheckPort).To(gomega.Equal(1234)) gomega.Expect(OVNKubernetesFeature.EnableMultiNetwork).To(gomega.BeTrue()) gomega.Expect(OVNKubernetesFeature.EnableNetworkSegmentation).To(gomega.BeTrue()) + gomega.Expect(OVNKubernetesFeature.EnableRouteAdvertisements).To(gomega.BeTrue()) gomega.Expect(OVNKubernetesFeature.EnableInterconnect).To(gomega.BeTrue()) gomega.Expect(OVNKubernetesFeature.EnableMultiExternalGateway).To(gomega.BeTrue()) gomega.Expect(OVNKubernetesFeature.EnableAdminNetworkPolicy).To(gomega.BeTrue()) @@ -743,6 +749,7 @@ var _ = Describe("Config Operations", func() { gomega.Expect(Kubernetes.HealthzBindAddress).To(gomega.Equal("0.0.0.0:4321")) gomega.Expect(Kubernetes.DNSServiceNamespace).To(gomega.Equal("kube-system-2")) gomega.Expect(Kubernetes.DNSServiceName).To(gomega.Equal("kube-dns-2")) + gomega.Expect(Kubernetes.DisableRequestedChassis).To(gomega.BeTrue()) gomega.Expect(Default.ClusterSubnets).To(gomega.Equal([]CIDRNetworkEntry{ {ovntest.MustParseIPNet("10.130.0.0/15"), 24}, })) @@ -787,6 +794,7 @@ var _ = Describe("Config Operations", func() { gomega.Expect(OVNKubernetesFeature.EgressIPNodeHealthCheckPort).To(gomega.Equal(4321)) gomega.Expect(OVNKubernetesFeature.EnableMultiNetwork).To(gomega.BeTrue()) gomega.Expect(OVNKubernetesFeature.EnableNetworkSegmentation).To(gomega.BeTrue()) + gomega.Expect(OVNKubernetesFeature.EnableRouteAdvertisements).To(gomega.BeTrue()) gomega.Expect(OVNKubernetesFeature.EnableMultiNetworkPolicy).To(gomega.BeTrue()) gomega.Expect(OVNKubernetesFeature.EnableInterconnect).To(gomega.BeTrue()) gomega.Expect(OVNKubernetesFeature.EnableMultiExternalGateway).To(gomega.BeTrue()) @@ -861,6 +869,7 @@ var _ = Describe("Config Operations", func() { "-enable-multi-network=true", "-enable-multi-networkpolicy=true", "-enable-network-segmentation=true", + "-enable-route-advertisements=true", "-enable-interconnect=true", "-enable-multi-external-gateway=true", "-enable-admin-network-policy=true", @@ -869,6 +878,7 @@ var _ = Describe("Config Operations", func() { "-zone=bar", "-dns-service-namespace=kube-system-2", "-dns-service-name=kube-dns-2", + "-disable-requestedchassis=true", "-cluster-manager-v4-transit-switch-subnet=100.90.0.0/16", "-cluster-manager-v6-transit-switch-subnet=fd96::/64", } @@ -1600,6 +1610,45 @@ foo=bar gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) + It("rejects a config with invalid udn allowed services", func() { + err := ioutil.WriteFile(cfgFile.Name(), []byte(`[default] +udn-allowed-default-services=namespace/invalid.name,test +`), 0o644) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + app.Action = func(ctx *cli.Context) error { + _, err = InitConfig(ctx, kexec.New(), nil) + gomega.Expect(err).To(gomega.HaveOccurred()) + + return nil + } + cliArgs := []string{ + app.Name, + "-config-file=" + cfgFile.Name(), + } + err = app.Run(cliArgs) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + It("accepts a config with valid udn allowed services", func() { + err := ioutil.WriteFile(cfgFile.Name(), []byte(`[default] +udn-allowed-default-services= ns/svc, ns1/svc1 +`), 0o644) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + app.Action = func(ctx *cli.Context) error { + _, err = InitConfig(ctx, kexec.New(), nil) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Expect(Default.UDNAllowedDefaultServices).To(gomega.HaveLen(2)) + return nil + } + cliArgs := []string{ + app.Name, + "-config-file=" + cfgFile.Name(), + } + err = app.Run(cliArgs) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) Describe("OvnDBAuth operations", func() { var certFile, keyFile, caFile string diff --git a/go-controller/pkg/config/utils.go b/go-controller/pkg/config/utils.go index 17cdb3027e..b27be4d7cb 100644 --- a/go-controller/pkg/config/utils.go +++ b/go-controller/pkg/config/utils.go @@ -162,67 +162,69 @@ func ParseFlowCollectors(flowCollectors string) ([]HostPort, error) { return parsedFlowsCollectors, nil } -type configSubnetType string +type ConfigSubnetType string const ( - configSubnetJoin configSubnetType = "built-in join subnet" - configSubnetCluster configSubnetType = "cluster subnet" - configSubnetService configSubnetType = "service subnet" - configSubnetHybrid configSubnetType = "hybrid overlay subnet" - configSubnetMasquerade configSubnetType = "masquerade subnet" - configSubnetTransit configSubnetType = "transit switch subnet" + ConfigSubnetJoin ConfigSubnetType = "built-in join subnet" + ConfigSubnetCluster ConfigSubnetType = "cluster subnet" + ConfigSubnetService ConfigSubnetType = "service subnet" + ConfigSubnetHybrid ConfigSubnetType = "hybrid overlay subnet" + ConfigSubnetMasquerade ConfigSubnetType = "masquerade subnet" + ConfigSubnetTransit ConfigSubnetType = "transit switch subnet" + UserDefinedSubnets ConfigSubnetType = "user defined subnet" + UserDefinedJoinSubnet ConfigSubnetType = "user defined join subnet" ) -type configSubnet struct { - subnetType configSubnetType - subnet *net.IPNet +type ConfigSubnet struct { + SubnetType ConfigSubnetType + Subnet *net.IPNet } -// configSubnets represents a set of configured subnets (and their names) -type configSubnets struct { - subnets []configSubnet - v4 map[configSubnetType]bool - v6 map[configSubnetType]bool +// ConfigSubnets represents a set of configured subnets (and their names) +type ConfigSubnets struct { + Subnets []ConfigSubnet + V4 map[ConfigSubnetType]bool + V6 map[ConfigSubnetType]bool } -// newConfigSubnets returns a new configSubnets -func newConfigSubnets() *configSubnets { - return &configSubnets{ - v4: make(map[configSubnetType]bool), - v6: make(map[configSubnetType]bool), +// NewConfigSubnets returns a new ConfigSubnets +func NewConfigSubnets() *ConfigSubnets { + return &ConfigSubnets{ + V4: make(map[ConfigSubnetType]bool), + V6: make(map[ConfigSubnetType]bool), } } // append adds a single subnet to cs -func (cs *configSubnets) append(subnetType configSubnetType, subnet *net.IPNet) { - cs.subnets = append(cs.subnets, configSubnet{subnetType: subnetType, subnet: subnet}) - if subnetType != configSubnetJoin && subnetType != configSubnetMasquerade && subnetType != configSubnetTransit { +func (cs *ConfigSubnets) Append(subnetType ConfigSubnetType, subnet *net.IPNet) { + cs.Subnets = append(cs.Subnets, ConfigSubnet{SubnetType: subnetType, Subnet: subnet}) + if subnetType == ConfigSubnetCluster || subnetType == ConfigSubnetService || subnetType == ConfigSubnetHybrid { if utilnet.IsIPv6CIDR(subnet) { - cs.v6[subnetType] = true + cs.V6[subnetType] = true } else { - cs.v4[subnetType] = true + cs.V4[subnetType] = true } } } -// checkForOverlaps checks if any of the subnets in cs overlap -func (cs *configSubnets) checkForOverlaps() error { - for i, si := range cs.subnets { +// CheckForOverlaps checks if any of the subnets in cs overlap +func (cs *ConfigSubnets) CheckForOverlaps() error { + for i, si := range cs.Subnets { for j := 0; j < i; j++ { - sj := cs.subnets[j] - if si.subnet.Contains(sj.subnet.IP) || sj.subnet.Contains(si.subnet.IP) { + sj := cs.Subnets[j] + if si.Subnet.Contains(sj.Subnet.IP) || sj.Subnet.Contains(si.Subnet.IP) { return fmt.Errorf("illegal network configuration: %s %q overlaps %s %q", - si.subnetType, si.subnet.String(), - sj.subnetType, sj.subnet.String()) + si.SubnetType, si.Subnet.String(), + sj.SubnetType, sj.Subnet.String()) } } } return nil } -func (cs *configSubnets) describeSubnetType(subnetType configSubnetType) string { - ipv4 := cs.v4[subnetType] - ipv6 := cs.v6[subnetType] +func (cs *ConfigSubnets) describeSubnetType(subnetType ConfigSubnetType) string { + ipv4 := cs.V4[subnetType] + ipv6 := cs.V6[subnetType] var familyType string switch { case ipv4 && !ipv6: @@ -240,45 +242,27 @@ func (cs *configSubnets) describeSubnetType(subnetType configSubnetType) string // checkIPFamilies determines if cs contains a valid single-stack IPv4 configuration, a // valid single-stack IPv6 configuration, a valid dual-stack configuration, or none of the // above. -func (cs *configSubnets) checkIPFamilies() (usingIPv4, usingIPv6 bool, err error) { - if len(cs.v6) == 0 { +func (cs *ConfigSubnets) checkIPFamilies() (usingIPv4, usingIPv6 bool, err error) { + if len(cs.V6) == 0 { // Single-stack IPv4 return true, false, nil - } else if len(cs.v4) == 0 { + } else if len(cs.V4) == 0 { // Single-stack IPv6 return false, true, nil - } else if reflect.DeepEqual(cs.v4, cs.v6) { + } else if reflect.DeepEqual(cs.V4, cs.V6) { // Dual-stack return true, true, nil } - netConfig := cs.describeSubnetType(configSubnetCluster) - netConfig += ", " + cs.describeSubnetType(configSubnetService) - if cs.v4[configSubnetHybrid] || cs.v6[configSubnetHybrid] { - netConfig += ", " + cs.describeSubnetType(configSubnetHybrid) + netConfig := cs.describeSubnetType(ConfigSubnetCluster) + netConfig += ", " + cs.describeSubnetType(ConfigSubnetService) + if cs.V4[ConfigSubnetHybrid] || cs.V6[ConfigSubnetHybrid] { + netConfig += ", " + cs.describeSubnetType(ConfigSubnetHybrid) } return false, false, fmt.Errorf("illegal network configuration: %s", netConfig) } -func ContainsJoinIP(ip net.IP) bool { - var joinSubnetsConfig []string - if IPv4Mode { - joinSubnetsConfig = append(joinSubnetsConfig, Gateway.V4JoinSubnet) - } - if IPv6Mode { - joinSubnetsConfig = append(joinSubnetsConfig, Gateway.V6JoinSubnet) - } - - for _, subnet := range joinSubnetsConfig { - _, joinSubnet, _ := net.ParseCIDR(subnet) - if joinSubnet.Contains(ip) { - return true - } - } - return false -} - // masqueradeIP represents the masqueradeIPs used by the masquerade subnets for host to service traffic type MasqueradeIPsConfig struct { V4OVNMasqueradeIP net.IP diff --git a/go-controller/pkg/config/utils_test.go b/go-controller/pkg/config/utils_test.go index d59f795ca3..0092dc34dd 100644 --- a/go-controller/pkg/config/utils_test.go +++ b/go-controller/pkg/config/utils_test.go @@ -300,15 +300,15 @@ func Test_checkForOverlap(t *testing.T) { } for _, tc := range tests { - allSubnets := newConfigSubnets() + allSubnets := NewConfigSubnets() for _, joinSubnet := range tc.joinSubnetCIDRList { - allSubnets.append(configSubnetJoin, joinSubnet) + allSubnets.Append(ConfigSubnetJoin, joinSubnet) } for _, subnet := range tc.cidrList { - allSubnets.append(configSubnetCluster, subnet) + allSubnets.Append(ConfigSubnetCluster, subnet) } - err := allSubnets.checkForOverlaps() + err := allSubnets.CheckForOverlaps() if err == nil && tc.shouldError { t.Errorf("testcase \"%s\" failed to find overlap", tc.name) } else if err != nil && !tc.shouldError { diff --git a/go-controller/pkg/controller/controller.go b/go-controller/pkg/controller/controller.go index 3dc7b3397a..9570c8c7c0 100644 --- a/go-controller/pkg/controller/controller.go +++ b/go-controller/pkg/controller/controller.go @@ -37,14 +37,14 @@ type Controller interface { } type ReconcilerConfig struct { - RateLimiter workqueue.RateLimiter + RateLimiter workqueue.TypedRateLimiter[string] Reconcile func(key string) error // How many workers should be started for this reconciler. Threadiness int } type ControllerConfig[T any] struct { - RateLimiter workqueue.RateLimiter + RateLimiter workqueue.TypedRateLimiter[string] Reconcile func(key string) error // How many workers should be started for this controller. Threadiness int @@ -62,7 +62,7 @@ type controller[T any] struct { config *ControllerConfig[T] eventHandler cache.ResourceEventHandlerRegistration - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] stopChan chan struct{} wg *sync.WaitGroup } @@ -84,9 +84,9 @@ func NewController[T any](name string, config *ControllerConfig[T]) Controller { return &controller[T]{ name: name, config: config, - queue: workqueue.NewRateLimitingQueueWithConfig( + queue: workqueue.NewTypedRateLimitingQueueWithConfig( config.RateLimiter, - workqueue.RateLimitingQueueConfig{ + workqueue.TypedRateLimitingQueueConfig[string]{ Name: name, }, ), @@ -139,7 +139,12 @@ func (c *controller[T]) startWorkers() error { } func (c *controller[T]) stop() { + // we assign stopChan to nil to signal that controller was already stopped. + if c.stopChan == nil { + return + } close(c.stopChan) + c.stopChan = nil c.cleanup() c.wg.Wait() } @@ -216,14 +221,14 @@ func (c *controller[T]) processNextQueueItem() bool { defer c.queue.Done(key) - err := c.config.Reconcile(key.(string)) + err := c.config.Reconcile(key) if err != nil { if c.queue.NumRequeues(key) < maxRetries { - klog.Infof("Controller %s: error found while processing %s: %v", c.name, key.(string), err) + klog.Infof("Controller %s: error found while processing %s: %v", c.name, key, err) c.queue.AddRateLimited(key) return true } - klog.Warningf("Controller %s: dropping %s out of the queue: %v", c.name, key.(string), err) + klog.Warningf("Controller %s: dropping %s out of the queue: %v", c.name, key, err) utilruntime.HandleError(err) } c.queue.Forget(key) diff --git a/go-controller/pkg/controller/controller_suite_test.go b/go-controller/pkg/controller/controller_suite_test.go index f3007fdd58..90bdad39ed 100644 --- a/go-controller/pkg/controller/controller_suite_test.go +++ b/go-controller/pkg/controller/controller_suite_test.go @@ -3,7 +3,7 @@ package controller import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/controller/controller_test.go b/go-controller/pkg/controller/controller_test.go index 3561b51fc8..d1c7f7be70 100644 --- a/go-controller/pkg/controller/controller_test.go +++ b/go-controller/pkg/controller/controller_test.go @@ -8,7 +8,7 @@ import ( "sync/atomic" "time" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" @@ -57,7 +57,7 @@ var _ = Describe("Level-driven controller", func() { config.Informer = coreFactory.Core().V1().Pods().Informer() config.Lister = coreFactory.Core().V1().Pods().Lister().List if config.RateLimiter == nil { - config.RateLimiter = workqueue.NewItemFastSlowRateLimiter(100*time.Millisecond, 1*time.Second, 5) + config.RateLimiter = workqueue.NewTypedItemFastSlowRateLimiter[string](100*time.Millisecond, 1*time.Second, 5) } controller = NewController[v1.Pod]("controller-name", config) @@ -91,6 +91,11 @@ var _ = Describe("Level-driven controller", func() { Stop(controller) }) + It("has idempotent Stop", func() { + startController(getDefaultConfig(), nil) + Stop(controller) + Stop(controller) + }) It("handles initial objects once", func() { namespace := util.NewNamespace(namespace1Name) pod1 := &v1.Pod{ @@ -132,7 +137,7 @@ var _ = Describe("Level-driven controller", func() { failureCounter.Add(1) return fmt.Errorf("failure") } - config.RateLimiter = workqueue.NewItemFastSlowRateLimiter(100*time.Millisecond, 1*time.Second, maxRetries) + config.RateLimiter = workqueue.NewTypedItemFastSlowRateLimiter[string](100*time.Millisecond, 1*time.Second, maxRetries) startController(config, nil, namespace, pod) Eventually(failureCounter.Load, (maxRetries+1)*100*time.Millisecond).Should(BeEquivalentTo(maxRetries)) @@ -245,14 +250,14 @@ var _ = Describe("Level-driven controllers with shared initialSync", func() { podConfig.Informer = coreFactory.Core().V1().Pods().Informer() podConfig.Lister = coreFactory.Core().V1().Pods().Lister().List if podConfig.RateLimiter == nil { - podConfig.RateLimiter = workqueue.NewItemFastSlowRateLimiter(100*time.Millisecond, 1*time.Second, 5) + podConfig.RateLimiter = workqueue.NewTypedItemFastSlowRateLimiter[string](100*time.Millisecond, 1*time.Second, 5) } podController = NewController[v1.Pod]("podController", podConfig) nsConfig.Informer = coreFactory.Core().V1().Namespaces().Informer() nsConfig.Lister = coreFactory.Core().V1().Namespaces().Lister().List if nsConfig.RateLimiter == nil { - nsConfig.RateLimiter = workqueue.NewItemFastSlowRateLimiter(100*time.Millisecond, 1*time.Second, 5) + nsConfig.RateLimiter = workqueue.NewTypedItemFastSlowRateLimiter[string](100*time.Millisecond, 1*time.Second, 5) } namespaceController = NewController[v1.Namespace]("namespaceController", nsConfig) diff --git a/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1/adminpolicybasedexternalroute.go b/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1/adminpolicybasedexternalroute.go index 4ef8a4aef2..871dfd6070 100644 --- a/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1/adminpolicybasedexternalroute.go +++ b/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1/adminpolicybasedexternalroute.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// AdminPolicyBasedExternalRouteApplyConfiguration represents an declarative configuration of the AdminPolicyBasedExternalRoute type for use +// AdminPolicyBasedExternalRouteApplyConfiguration represents a declarative configuration of the AdminPolicyBasedExternalRoute type for use // with apply. type AdminPolicyBasedExternalRouteApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -32,7 +32,7 @@ type AdminPolicyBasedExternalRouteApplyConfiguration struct { Status *AdminPolicyBasedRouteStatusApplyConfiguration `json:"status,omitempty"` } -// AdminPolicyBasedExternalRoute constructs an declarative configuration of the AdminPolicyBasedExternalRoute type for use with +// AdminPolicyBasedExternalRoute constructs a declarative configuration of the AdminPolicyBasedExternalRoute type for use with // apply. func AdminPolicyBasedExternalRoute(name string) *AdminPolicyBasedExternalRouteApplyConfiguration { b := &AdminPolicyBasedExternalRouteApplyConfiguration{} @@ -215,3 +215,9 @@ func (b *AdminPolicyBasedExternalRouteApplyConfiguration) WithStatus(value *Admi b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *AdminPolicyBasedExternalRouteApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1/adminpolicybasedexternalroutespec.go b/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1/adminpolicybasedexternalroutespec.go index 9ef82d1659..af82cc3e1f 100644 --- a/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1/adminpolicybasedexternalroutespec.go +++ b/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1/adminpolicybasedexternalroutespec.go @@ -17,14 +17,14 @@ limitations under the License. package v1 -// AdminPolicyBasedExternalRouteSpecApplyConfiguration represents an declarative configuration of the AdminPolicyBasedExternalRouteSpec type for use +// AdminPolicyBasedExternalRouteSpecApplyConfiguration represents a declarative configuration of the AdminPolicyBasedExternalRouteSpec type for use // with apply. type AdminPolicyBasedExternalRouteSpecApplyConfiguration struct { From *ExternalNetworkSourceApplyConfiguration `json:"from,omitempty"` NextHops *ExternalNextHopsApplyConfiguration `json:"nextHops,omitempty"` } -// AdminPolicyBasedExternalRouteSpecApplyConfiguration constructs an declarative configuration of the AdminPolicyBasedExternalRouteSpec type for use with +// AdminPolicyBasedExternalRouteSpecApplyConfiguration constructs a declarative configuration of the AdminPolicyBasedExternalRouteSpec type for use with // apply. func AdminPolicyBasedExternalRouteSpec() *AdminPolicyBasedExternalRouteSpecApplyConfiguration { return &AdminPolicyBasedExternalRouteSpecApplyConfiguration{} diff --git a/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1/adminpolicybasedroutestatus.go b/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1/adminpolicybasedroutestatus.go index cfadb10859..3d12b9e571 100644 --- a/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1/adminpolicybasedroutestatus.go +++ b/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1/adminpolicybasedroutestatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// AdminPolicyBasedRouteStatusApplyConfiguration represents an declarative configuration of the AdminPolicyBasedRouteStatus type for use +// AdminPolicyBasedRouteStatusApplyConfiguration represents a declarative configuration of the AdminPolicyBasedRouteStatus type for use // with apply. type AdminPolicyBasedRouteStatusApplyConfiguration struct { LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` @@ -30,7 +30,7 @@ type AdminPolicyBasedRouteStatusApplyConfiguration struct { Status *adminpolicybasedroutev1.StatusType `json:"status,omitempty"` } -// AdminPolicyBasedRouteStatusApplyConfiguration constructs an declarative configuration of the AdminPolicyBasedRouteStatus type for use with +// AdminPolicyBasedRouteStatusApplyConfiguration constructs a declarative configuration of the AdminPolicyBasedRouteStatus type for use with // apply. func AdminPolicyBasedRouteStatus() *AdminPolicyBasedRouteStatusApplyConfiguration { return &AdminPolicyBasedRouteStatusApplyConfiguration{} diff --git a/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1/dynamichop.go b/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1/dynamichop.go index b233656813..ba5103b4eb 100644 --- a/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1/dynamichop.go +++ b/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1/dynamichop.go @@ -18,19 +18,19 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DynamicHopApplyConfiguration represents an declarative configuration of the DynamicHop type for use +// DynamicHopApplyConfiguration represents a declarative configuration of the DynamicHop type for use // with apply. type DynamicHopApplyConfiguration struct { - PodSelector *v1.LabelSelector `json:"podSelector,omitempty"` - NamespaceSelector *v1.LabelSelector `json:"namespaceSelector,omitempty"` - NetworkAttachmentName *string `json:"networkAttachmentName,omitempty"` - BFDEnabled *bool `json:"bfdEnabled,omitempty"` + PodSelector *v1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"` + NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + NetworkAttachmentName *string `json:"networkAttachmentName,omitempty"` + BFDEnabled *bool `json:"bfdEnabled,omitempty"` } -// DynamicHopApplyConfiguration constructs an declarative configuration of the DynamicHop type for use with +// DynamicHopApplyConfiguration constructs a declarative configuration of the DynamicHop type for use with // apply. func DynamicHop() *DynamicHopApplyConfiguration { return &DynamicHopApplyConfiguration{} @@ -39,16 +39,16 @@ func DynamicHop() *DynamicHopApplyConfiguration { // WithPodSelector sets the PodSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PodSelector field is set to the value of the last call. -func (b *DynamicHopApplyConfiguration) WithPodSelector(value v1.LabelSelector) *DynamicHopApplyConfiguration { - b.PodSelector = &value +func (b *DynamicHopApplyConfiguration) WithPodSelector(value *v1.LabelSelectorApplyConfiguration) *DynamicHopApplyConfiguration { + b.PodSelector = value return b } // WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NamespaceSelector field is set to the value of the last call. -func (b *DynamicHopApplyConfiguration) WithNamespaceSelector(value v1.LabelSelector) *DynamicHopApplyConfiguration { - b.NamespaceSelector = &value +func (b *DynamicHopApplyConfiguration) WithNamespaceSelector(value *v1.LabelSelectorApplyConfiguration) *DynamicHopApplyConfiguration { + b.NamespaceSelector = value return b } diff --git a/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1/externalnetworksource.go b/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1/externalnetworksource.go index e4d679eab9..b1d3545575 100644 --- a/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1/externalnetworksource.go +++ b/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1/externalnetworksource.go @@ -18,16 +18,16 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ExternalNetworkSourceApplyConfiguration represents an declarative configuration of the ExternalNetworkSource type for use +// ExternalNetworkSourceApplyConfiguration represents a declarative configuration of the ExternalNetworkSource type for use // with apply. type ExternalNetworkSourceApplyConfiguration struct { - NamespaceSelector *v1.LabelSelector `json:"namespaceSelector,omitempty"` + NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` } -// ExternalNetworkSourceApplyConfiguration constructs an declarative configuration of the ExternalNetworkSource type for use with +// ExternalNetworkSourceApplyConfiguration constructs a declarative configuration of the ExternalNetworkSource type for use with // apply. func ExternalNetworkSource() *ExternalNetworkSourceApplyConfiguration { return &ExternalNetworkSourceApplyConfiguration{} @@ -36,7 +36,7 @@ func ExternalNetworkSource() *ExternalNetworkSourceApplyConfiguration { // WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NamespaceSelector field is set to the value of the last call. -func (b *ExternalNetworkSourceApplyConfiguration) WithNamespaceSelector(value v1.LabelSelector) *ExternalNetworkSourceApplyConfiguration { - b.NamespaceSelector = &value +func (b *ExternalNetworkSourceApplyConfiguration) WithNamespaceSelector(value *v1.LabelSelectorApplyConfiguration) *ExternalNetworkSourceApplyConfiguration { + b.NamespaceSelector = value return b } diff --git a/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1/externalnexthops.go b/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1/externalnexthops.go index f45b003076..7534f7f21a 100644 --- a/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1/externalnexthops.go +++ b/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1/externalnexthops.go @@ -21,14 +21,14 @@ import ( v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1" ) -// ExternalNextHopsApplyConfiguration represents an declarative configuration of the ExternalNextHops type for use +// ExternalNextHopsApplyConfiguration represents a declarative configuration of the ExternalNextHops type for use // with apply. type ExternalNextHopsApplyConfiguration struct { StaticHops []*v1.StaticHop `json:"static,omitempty"` DynamicHops []*v1.DynamicHop `json:"dynamic,omitempty"` } -// ExternalNextHopsApplyConfiguration constructs an declarative configuration of the ExternalNextHops type for use with +// ExternalNextHopsApplyConfiguration constructs a declarative configuration of the ExternalNextHops type for use with // apply. func ExternalNextHops() *ExternalNextHopsApplyConfiguration { return &ExternalNextHopsApplyConfiguration{} diff --git a/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1/statichop.go b/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1/statichop.go index a6eed61626..bb4c61a31f 100644 --- a/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1/statichop.go +++ b/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1/statichop.go @@ -17,14 +17,14 @@ limitations under the License. package v1 -// StaticHopApplyConfiguration represents an declarative configuration of the StaticHop type for use +// StaticHopApplyConfiguration represents a declarative configuration of the StaticHop type for use // with apply. type StaticHopApplyConfiguration struct { IP *string `json:"ip,omitempty"` BFDEnabled *bool `json:"bfdEnabled,omitempty"` } -// StaticHopApplyConfiguration constructs an declarative configuration of the StaticHop type for use with +// StaticHopApplyConfiguration constructs a declarative configuration of the StaticHop type for use with // apply. func StaticHop() *StaticHopApplyConfiguration { return &StaticHopApplyConfiguration{} diff --git a/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/utils.go b/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/utils.go index cd3301cea1..b108107122 100644 --- a/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/utils.go +++ b/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/utils.go @@ -20,7 +20,10 @@ package applyconfiguration import ( v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1" adminpolicybasedroutev1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1" + internal "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/internal" + runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" ) // ForKind returns an apply configuration type for the given GroupVersionKind, or nil if no @@ -46,3 +49,7 @@ func ForKind(kind schema.GroupVersionKind) interface{} { } return nil } + +func NewTypeConverter(scheme *runtime.Scheme) *testing.TypeConverter { + return &testing.TypeConverter{Scheme: scheme, TypeResolver: internal.Parser()} +} diff --git a/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/clientset/versioned/fake/clientset_generated.go b/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/clientset/versioned/fake/clientset_generated.go index b7b1d957c8..b38aab721d 100644 --- a/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/clientset/versioned/fake/clientset_generated.go +++ b/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/clientset/versioned/fake/clientset_generated.go @@ -18,6 +18,7 @@ limitations under the License. package fake import ( + applyconfiguration "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration" clientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/clientset/versioned" k8sv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/clientset/versioned/typed/adminpolicybasedroute/v1" fakek8sv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/clientset/versioned/typed/adminpolicybasedroute/v1/fake" @@ -30,8 +31,12 @@ import ( // NewSimpleClientset returns a clientset that will respond with the provided objects. // It's backed by a very simple object tracker that processes creates, updates and deletions as-is, -// without applying any validations and/or defaults. It shouldn't be considered a replacement +// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement // for a real clientset and is mostly useful in simple unit tests. +// +// DEPRECATED: NewClientset replaces this with support for field management, which significantly improves +// server side apply testing. NewClientset is only available when apply configurations are generated (e.g. +// via --with-applyconfig). func NewSimpleClientset(objects ...runtime.Object) *Clientset { o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) for _, obj := range objects { @@ -73,6 +78,38 @@ func (c *Clientset) Tracker() testing.ObjectTracker { return c.tracker } +// NewClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewClientset(objects ...runtime.Object) *Clientset { + o := testing.NewFieldManagedObjectTracker( + scheme, + codecs.UniversalDecoder(), + applyconfiguration.NewTypeConverter(scheme), + ) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + var ( _ clientset.Interface = &Clientset{} _ testing.FakeClient = &Clientset{} diff --git a/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/clientset/versioned/typed/adminpolicybasedroute/v1/adminpolicybasedexternalroute.go b/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/clientset/versioned/typed/adminpolicybasedroute/v1/adminpolicybasedexternalroute.go index b95bea10bf..0058d77877 100644 --- a/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/clientset/versioned/typed/adminpolicybasedroute/v1/adminpolicybasedexternalroute.go +++ b/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/clientset/versioned/typed/adminpolicybasedroute/v1/adminpolicybasedexternalroute.go @@ -19,9 +19,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1" adminpolicybasedroutev1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1" @@ -29,7 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // AdminPolicyBasedExternalRoutesGetter has a method to return a AdminPolicyBasedExternalRouteInterface. @@ -42,6 +39,7 @@ type AdminPolicyBasedExternalRoutesGetter interface { type AdminPolicyBasedExternalRouteInterface interface { Create(ctx context.Context, adminPolicyBasedExternalRoute *v1.AdminPolicyBasedExternalRoute, opts metav1.CreateOptions) (*v1.AdminPolicyBasedExternalRoute, error) Update(ctx context.Context, adminPolicyBasedExternalRoute *v1.AdminPolicyBasedExternalRoute, opts metav1.UpdateOptions) (*v1.AdminPolicyBasedExternalRoute, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, adminPolicyBasedExternalRoute *v1.AdminPolicyBasedExternalRoute, opts metav1.UpdateOptions) (*v1.AdminPolicyBasedExternalRoute, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -50,193 +48,25 @@ type AdminPolicyBasedExternalRouteInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.AdminPolicyBasedExternalRoute, err error) Apply(ctx context.Context, adminPolicyBasedExternalRoute *adminpolicybasedroutev1.AdminPolicyBasedExternalRouteApplyConfiguration, opts metav1.ApplyOptions) (result *v1.AdminPolicyBasedExternalRoute, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, adminPolicyBasedExternalRoute *adminpolicybasedroutev1.AdminPolicyBasedExternalRouteApplyConfiguration, opts metav1.ApplyOptions) (result *v1.AdminPolicyBasedExternalRoute, err error) AdminPolicyBasedExternalRouteExpansion } // adminPolicyBasedExternalRoutes implements AdminPolicyBasedExternalRouteInterface type adminPolicyBasedExternalRoutes struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.AdminPolicyBasedExternalRoute, *v1.AdminPolicyBasedExternalRouteList, *adminpolicybasedroutev1.AdminPolicyBasedExternalRouteApplyConfiguration] } // newAdminPolicyBasedExternalRoutes returns a AdminPolicyBasedExternalRoutes func newAdminPolicyBasedExternalRoutes(c *K8sV1Client) *adminPolicyBasedExternalRoutes { return &adminPolicyBasedExternalRoutes{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.AdminPolicyBasedExternalRoute, *v1.AdminPolicyBasedExternalRouteList, *adminpolicybasedroutev1.AdminPolicyBasedExternalRouteApplyConfiguration]( + "adminpolicybasedexternalroutes", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.AdminPolicyBasedExternalRoute { return &v1.AdminPolicyBasedExternalRoute{} }, + func() *v1.AdminPolicyBasedExternalRouteList { return &v1.AdminPolicyBasedExternalRouteList{} }), } } - -// Get takes name of the adminPolicyBasedExternalRoute, and returns the corresponding adminPolicyBasedExternalRoute object, and an error if there is any. -func (c *adminPolicyBasedExternalRoutes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.AdminPolicyBasedExternalRoute, err error) { - result = &v1.AdminPolicyBasedExternalRoute{} - err = c.client.Get(). - Resource("adminpolicybasedexternalroutes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of AdminPolicyBasedExternalRoutes that match those selectors. -func (c *adminPolicyBasedExternalRoutes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.AdminPolicyBasedExternalRouteList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.AdminPolicyBasedExternalRouteList{} - err = c.client.Get(). - Resource("adminpolicybasedexternalroutes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested adminPolicyBasedExternalRoutes. -func (c *adminPolicyBasedExternalRoutes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("adminpolicybasedexternalroutes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a adminPolicyBasedExternalRoute and creates it. Returns the server's representation of the adminPolicyBasedExternalRoute, and an error, if there is any. -func (c *adminPolicyBasedExternalRoutes) Create(ctx context.Context, adminPolicyBasedExternalRoute *v1.AdminPolicyBasedExternalRoute, opts metav1.CreateOptions) (result *v1.AdminPolicyBasedExternalRoute, err error) { - result = &v1.AdminPolicyBasedExternalRoute{} - err = c.client.Post(). - Resource("adminpolicybasedexternalroutes"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(adminPolicyBasedExternalRoute). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a adminPolicyBasedExternalRoute and updates it. Returns the server's representation of the adminPolicyBasedExternalRoute, and an error, if there is any. -func (c *adminPolicyBasedExternalRoutes) Update(ctx context.Context, adminPolicyBasedExternalRoute *v1.AdminPolicyBasedExternalRoute, opts metav1.UpdateOptions) (result *v1.AdminPolicyBasedExternalRoute, err error) { - result = &v1.AdminPolicyBasedExternalRoute{} - err = c.client.Put(). - Resource("adminpolicybasedexternalroutes"). - Name(adminPolicyBasedExternalRoute.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(adminPolicyBasedExternalRoute). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *adminPolicyBasedExternalRoutes) UpdateStatus(ctx context.Context, adminPolicyBasedExternalRoute *v1.AdminPolicyBasedExternalRoute, opts metav1.UpdateOptions) (result *v1.AdminPolicyBasedExternalRoute, err error) { - result = &v1.AdminPolicyBasedExternalRoute{} - err = c.client.Put(). - Resource("adminpolicybasedexternalroutes"). - Name(adminPolicyBasedExternalRoute.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(adminPolicyBasedExternalRoute). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the adminPolicyBasedExternalRoute and deletes it. Returns an error if one occurs. -func (c *adminPolicyBasedExternalRoutes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("adminpolicybasedexternalroutes"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *adminPolicyBasedExternalRoutes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("adminpolicybasedexternalroutes"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched adminPolicyBasedExternalRoute. -func (c *adminPolicyBasedExternalRoutes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.AdminPolicyBasedExternalRoute, err error) { - result = &v1.AdminPolicyBasedExternalRoute{} - err = c.client.Patch(pt). - Resource("adminpolicybasedexternalroutes"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied adminPolicyBasedExternalRoute. -func (c *adminPolicyBasedExternalRoutes) Apply(ctx context.Context, adminPolicyBasedExternalRoute *adminpolicybasedroutev1.AdminPolicyBasedExternalRouteApplyConfiguration, opts metav1.ApplyOptions) (result *v1.AdminPolicyBasedExternalRoute, err error) { - if adminPolicyBasedExternalRoute == nil { - return nil, fmt.Errorf("adminPolicyBasedExternalRoute provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(adminPolicyBasedExternalRoute) - if err != nil { - return nil, err - } - name := adminPolicyBasedExternalRoute.Name - if name == nil { - return nil, fmt.Errorf("adminPolicyBasedExternalRoute.Name must be provided to Apply") - } - result = &v1.AdminPolicyBasedExternalRoute{} - err = c.client.Patch(types.ApplyPatchType). - Resource("adminpolicybasedexternalroutes"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *adminPolicyBasedExternalRoutes) ApplyStatus(ctx context.Context, adminPolicyBasedExternalRoute *adminpolicybasedroutev1.AdminPolicyBasedExternalRouteApplyConfiguration, opts metav1.ApplyOptions) (result *v1.AdminPolicyBasedExternalRoute, err error) { - if adminPolicyBasedExternalRoute == nil { - return nil, fmt.Errorf("adminPolicyBasedExternalRoute provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(adminPolicyBasedExternalRoute) - if err != nil { - return nil, err - } - - name := adminPolicyBasedExternalRoute.Name - if name == nil { - return nil, fmt.Errorf("adminPolicyBasedExternalRoute.Name must be provided to Apply") - } - - result = &v1.AdminPolicyBasedExternalRoute{} - err = c.client.Patch(types.ApplyPatchType). - Resource("adminpolicybasedexternalroutes"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/clientset/versioned/typed/adminpolicybasedroute/v1/fake/fake_adminpolicybasedexternalroute.go b/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/clientset/versioned/typed/adminpolicybasedroute/v1/fake/fake_adminpolicybasedexternalroute.go index 2f9a638a88..b0c2001bee 100644 --- a/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/clientset/versioned/typed/adminpolicybasedroute/v1/fake/fake_adminpolicybasedexternalroute.go +++ b/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/clientset/versioned/typed/adminpolicybasedroute/v1/fake/fake_adminpolicybasedexternalroute.go @@ -42,20 +42,22 @@ var adminpolicybasedexternalroutesKind = v1.SchemeGroupVersion.WithKind("AdminPo // Get takes name of the adminPolicyBasedExternalRoute, and returns the corresponding adminPolicyBasedExternalRoute object, and an error if there is any. func (c *FakeAdminPolicyBasedExternalRoutes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.AdminPolicyBasedExternalRoute, err error) { + emptyResult := &v1.AdminPolicyBasedExternalRoute{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(adminpolicybasedexternalroutesResource, name), &v1.AdminPolicyBasedExternalRoute{}) + Invokes(testing.NewRootGetActionWithOptions(adminpolicybasedexternalroutesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.AdminPolicyBasedExternalRoute), err } // List takes label and field selectors, and returns the list of AdminPolicyBasedExternalRoutes that match those selectors. func (c *FakeAdminPolicyBasedExternalRoutes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.AdminPolicyBasedExternalRouteList, err error) { + emptyResult := &v1.AdminPolicyBasedExternalRouteList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(adminpolicybasedexternalroutesResource, adminpolicybasedexternalroutesKind, opts), &v1.AdminPolicyBasedExternalRouteList{}) + Invokes(testing.NewRootListActionWithOptions(adminpolicybasedexternalroutesResource, adminpolicybasedexternalroutesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -74,36 +76,39 @@ func (c *FakeAdminPolicyBasedExternalRoutes) List(ctx context.Context, opts meta // Watch returns a watch.Interface that watches the requested adminPolicyBasedExternalRoutes. func (c *FakeAdminPolicyBasedExternalRoutes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(adminpolicybasedexternalroutesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(adminpolicybasedexternalroutesResource, opts)) } // Create takes the representation of a adminPolicyBasedExternalRoute and creates it. Returns the server's representation of the adminPolicyBasedExternalRoute, and an error, if there is any. func (c *FakeAdminPolicyBasedExternalRoutes) Create(ctx context.Context, adminPolicyBasedExternalRoute *v1.AdminPolicyBasedExternalRoute, opts metav1.CreateOptions) (result *v1.AdminPolicyBasedExternalRoute, err error) { + emptyResult := &v1.AdminPolicyBasedExternalRoute{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(adminpolicybasedexternalroutesResource, adminPolicyBasedExternalRoute), &v1.AdminPolicyBasedExternalRoute{}) + Invokes(testing.NewRootCreateActionWithOptions(adminpolicybasedexternalroutesResource, adminPolicyBasedExternalRoute, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.AdminPolicyBasedExternalRoute), err } // Update takes the representation of a adminPolicyBasedExternalRoute and updates it. Returns the server's representation of the adminPolicyBasedExternalRoute, and an error, if there is any. func (c *FakeAdminPolicyBasedExternalRoutes) Update(ctx context.Context, adminPolicyBasedExternalRoute *v1.AdminPolicyBasedExternalRoute, opts metav1.UpdateOptions) (result *v1.AdminPolicyBasedExternalRoute, err error) { + emptyResult := &v1.AdminPolicyBasedExternalRoute{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(adminpolicybasedexternalroutesResource, adminPolicyBasedExternalRoute), &v1.AdminPolicyBasedExternalRoute{}) + Invokes(testing.NewRootUpdateActionWithOptions(adminpolicybasedexternalroutesResource, adminPolicyBasedExternalRoute, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.AdminPolicyBasedExternalRoute), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeAdminPolicyBasedExternalRoutes) UpdateStatus(ctx context.Context, adminPolicyBasedExternalRoute *v1.AdminPolicyBasedExternalRoute, opts metav1.UpdateOptions) (*v1.AdminPolicyBasedExternalRoute, error) { +func (c *FakeAdminPolicyBasedExternalRoutes) UpdateStatus(ctx context.Context, adminPolicyBasedExternalRoute *v1.AdminPolicyBasedExternalRoute, opts metav1.UpdateOptions) (result *v1.AdminPolicyBasedExternalRoute, err error) { + emptyResult := &v1.AdminPolicyBasedExternalRoute{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(adminpolicybasedexternalroutesResource, "status", adminPolicyBasedExternalRoute), &v1.AdminPolicyBasedExternalRoute{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(adminpolicybasedexternalroutesResource, "status", adminPolicyBasedExternalRoute, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.AdminPolicyBasedExternalRoute), err } @@ -117,7 +122,7 @@ func (c *FakeAdminPolicyBasedExternalRoutes) Delete(ctx context.Context, name st // DeleteCollection deletes a collection of objects. func (c *FakeAdminPolicyBasedExternalRoutes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(adminpolicybasedexternalroutesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(adminpolicybasedexternalroutesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.AdminPolicyBasedExternalRouteList{}) return err @@ -125,10 +130,11 @@ func (c *FakeAdminPolicyBasedExternalRoutes) DeleteCollection(ctx context.Contex // Patch applies the patch and returns the patched adminPolicyBasedExternalRoute. func (c *FakeAdminPolicyBasedExternalRoutes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.AdminPolicyBasedExternalRoute, err error) { + emptyResult := &v1.AdminPolicyBasedExternalRoute{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(adminpolicybasedexternalroutesResource, name, pt, data, subresources...), &v1.AdminPolicyBasedExternalRoute{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(adminpolicybasedexternalroutesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.AdminPolicyBasedExternalRoute), err } @@ -146,10 +152,11 @@ func (c *FakeAdminPolicyBasedExternalRoutes) Apply(ctx context.Context, adminPol if name == nil { return nil, fmt.Errorf("adminPolicyBasedExternalRoute.Name must be provided to Apply") } + emptyResult := &v1.AdminPolicyBasedExternalRoute{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(adminpolicybasedexternalroutesResource, *name, types.ApplyPatchType, data), &v1.AdminPolicyBasedExternalRoute{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(adminpolicybasedexternalroutesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.AdminPolicyBasedExternalRoute), err } @@ -168,10 +175,11 @@ func (c *FakeAdminPolicyBasedExternalRoutes) ApplyStatus(ctx context.Context, ad if name == nil { return nil, fmt.Errorf("adminPolicyBasedExternalRoute.Name must be provided to Apply") } + emptyResult := &v1.AdminPolicyBasedExternalRoute{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(adminpolicybasedexternalroutesResource, *name, types.ApplyPatchType, data, "status"), &v1.AdminPolicyBasedExternalRoute{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(adminpolicybasedexternalroutesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.AdminPolicyBasedExternalRoute), err } diff --git a/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go b/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go index 5d77d5d913..559a5a1ebc 100644 --- a/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go +++ b/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go @@ -227,6 +227,7 @@ type SharedInformerFactory interface { // Start initializes all requested informers. They are handled in goroutines // which run until the stop channel gets closed. + // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. Start(stopCh <-chan struct{}) // Shutdown marks a factory as shutting down. At that point no new diff --git a/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/listers/adminpolicybasedroute/v1/adminpolicybasedexternalroute.go b/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/listers/adminpolicybasedroute/v1/adminpolicybasedexternalroute.go index d126826372..b0eaf39fc5 100644 --- a/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/listers/adminpolicybasedroute/v1/adminpolicybasedexternalroute.go +++ b/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/listers/adminpolicybasedroute/v1/adminpolicybasedexternalroute.go @@ -19,8 +19,8 @@ package v1 import ( v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,30 +38,10 @@ type AdminPolicyBasedExternalRouteLister interface { // adminPolicyBasedExternalRouteLister implements the AdminPolicyBasedExternalRouteLister interface. type adminPolicyBasedExternalRouteLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.AdminPolicyBasedExternalRoute] } // NewAdminPolicyBasedExternalRouteLister returns a new AdminPolicyBasedExternalRouteLister. func NewAdminPolicyBasedExternalRouteLister(indexer cache.Indexer) AdminPolicyBasedExternalRouteLister { - return &adminPolicyBasedExternalRouteLister{indexer: indexer} -} - -// List lists all AdminPolicyBasedExternalRoutes in the indexer. -func (s *adminPolicyBasedExternalRouteLister) List(selector labels.Selector) (ret []*v1.AdminPolicyBasedExternalRoute, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.AdminPolicyBasedExternalRoute)) - }) - return ret, err -} - -// Get retrieves the AdminPolicyBasedExternalRoute from the index for a given name. -func (s *adminPolicyBasedExternalRouteLister) Get(name string) (*v1.AdminPolicyBasedExternalRoute, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("adminpolicybasedexternalroute"), name) - } - return obj.(*v1.AdminPolicyBasedExternalRoute), nil + return &adminPolicyBasedExternalRouteLister{listers.New[*v1.AdminPolicyBasedExternalRoute](indexer, v1.Resource("adminpolicybasedexternalroute"))} } diff --git a/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration/egressfirewall/v1/egressfirewall.go b/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration/egressfirewall/v1/egressfirewall.go index aa6e2e5867..70b65093a0 100644 --- a/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration/egressfirewall/v1/egressfirewall.go +++ b/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration/egressfirewall/v1/egressfirewall.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EgressFirewallApplyConfiguration represents an declarative configuration of the EgressFirewall type for use +// EgressFirewallApplyConfiguration represents a declarative configuration of the EgressFirewall type for use // with apply. type EgressFirewallApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -32,7 +32,7 @@ type EgressFirewallApplyConfiguration struct { Status *EgressFirewallStatusApplyConfiguration `json:"status,omitempty"` } -// EgressFirewall constructs an declarative configuration of the EgressFirewall type for use with +// EgressFirewall constructs a declarative configuration of the EgressFirewall type for use with // apply. func EgressFirewall(name, namespace string) *EgressFirewallApplyConfiguration { b := &EgressFirewallApplyConfiguration{} @@ -216,3 +216,9 @@ func (b *EgressFirewallApplyConfiguration) WithStatus(value *EgressFirewallStatu b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EgressFirewallApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration/egressfirewall/v1/egressfirewalldestination.go b/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration/egressfirewall/v1/egressfirewalldestination.go index 368660d852..31d256bf84 100644 --- a/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration/egressfirewall/v1/egressfirewalldestination.go +++ b/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration/egressfirewall/v1/egressfirewalldestination.go @@ -18,18 +18,18 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EgressFirewallDestinationApplyConfiguration represents an declarative configuration of the EgressFirewallDestination type for use +// EgressFirewallDestinationApplyConfiguration represents a declarative configuration of the EgressFirewallDestination type for use // with apply. type EgressFirewallDestinationApplyConfiguration struct { - CIDRSelector *string `json:"cidrSelector,omitempty"` - DNSName *string `json:"dnsName,omitempty"` - NodeSelector *v1.LabelSelector `json:"nodeSelector,omitempty"` + CIDRSelector *string `json:"cidrSelector,omitempty"` + DNSName *string `json:"dnsName,omitempty"` + NodeSelector *v1.LabelSelectorApplyConfiguration `json:"nodeSelector,omitempty"` } -// EgressFirewallDestinationApplyConfiguration constructs an declarative configuration of the EgressFirewallDestination type for use with +// EgressFirewallDestinationApplyConfiguration constructs a declarative configuration of the EgressFirewallDestination type for use with // apply. func EgressFirewallDestination() *EgressFirewallDestinationApplyConfiguration { return &EgressFirewallDestinationApplyConfiguration{} @@ -54,7 +54,7 @@ func (b *EgressFirewallDestinationApplyConfiguration) WithDNSName(value string) // WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NodeSelector field is set to the value of the last call. -func (b *EgressFirewallDestinationApplyConfiguration) WithNodeSelector(value v1.LabelSelector) *EgressFirewallDestinationApplyConfiguration { - b.NodeSelector = &value +func (b *EgressFirewallDestinationApplyConfiguration) WithNodeSelector(value *v1.LabelSelectorApplyConfiguration) *EgressFirewallDestinationApplyConfiguration { + b.NodeSelector = value return b } diff --git a/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration/egressfirewall/v1/egressfirewallport.go b/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration/egressfirewall/v1/egressfirewallport.go index 23215eac64..634f9d2af0 100644 --- a/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration/egressfirewall/v1/egressfirewallport.go +++ b/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration/egressfirewall/v1/egressfirewallport.go @@ -17,14 +17,14 @@ limitations under the License. package v1 -// EgressFirewallPortApplyConfiguration represents an declarative configuration of the EgressFirewallPort type for use +// EgressFirewallPortApplyConfiguration represents a declarative configuration of the EgressFirewallPort type for use // with apply. type EgressFirewallPortApplyConfiguration struct { Protocol *string `json:"protocol,omitempty"` Port *int32 `json:"port,omitempty"` } -// EgressFirewallPortApplyConfiguration constructs an declarative configuration of the EgressFirewallPort type for use with +// EgressFirewallPortApplyConfiguration constructs a declarative configuration of the EgressFirewallPort type for use with // apply. func EgressFirewallPort() *EgressFirewallPortApplyConfiguration { return &EgressFirewallPortApplyConfiguration{} diff --git a/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration/egressfirewall/v1/egressfirewallrule.go b/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration/egressfirewall/v1/egressfirewallrule.go index f99b08482e..d6153c26ba 100644 --- a/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration/egressfirewall/v1/egressfirewallrule.go +++ b/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration/egressfirewall/v1/egressfirewallrule.go @@ -21,7 +21,7 @@ import ( v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1" ) -// EgressFirewallRuleApplyConfiguration represents an declarative configuration of the EgressFirewallRule type for use +// EgressFirewallRuleApplyConfiguration represents a declarative configuration of the EgressFirewallRule type for use // with apply. type EgressFirewallRuleApplyConfiguration struct { Type *v1.EgressFirewallRuleType `json:"type,omitempty"` @@ -29,7 +29,7 @@ type EgressFirewallRuleApplyConfiguration struct { To *EgressFirewallDestinationApplyConfiguration `json:"to,omitempty"` } -// EgressFirewallRuleApplyConfiguration constructs an declarative configuration of the EgressFirewallRule type for use with +// EgressFirewallRuleApplyConfiguration constructs a declarative configuration of the EgressFirewallRule type for use with // apply. func EgressFirewallRule() *EgressFirewallRuleApplyConfiguration { return &EgressFirewallRuleApplyConfiguration{} diff --git a/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration/egressfirewall/v1/egressfirewallspec.go b/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration/egressfirewall/v1/egressfirewallspec.go index 8586763a4d..2c6bc2546b 100644 --- a/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration/egressfirewall/v1/egressfirewallspec.go +++ b/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration/egressfirewall/v1/egressfirewallspec.go @@ -17,13 +17,13 @@ limitations under the License. package v1 -// EgressFirewallSpecApplyConfiguration represents an declarative configuration of the EgressFirewallSpec type for use +// EgressFirewallSpecApplyConfiguration represents a declarative configuration of the EgressFirewallSpec type for use // with apply. type EgressFirewallSpecApplyConfiguration struct { Egress []EgressFirewallRuleApplyConfiguration `json:"egress,omitempty"` } -// EgressFirewallSpecApplyConfiguration constructs an declarative configuration of the EgressFirewallSpec type for use with +// EgressFirewallSpecApplyConfiguration constructs a declarative configuration of the EgressFirewallSpec type for use with // apply. func EgressFirewallSpec() *EgressFirewallSpecApplyConfiguration { return &EgressFirewallSpecApplyConfiguration{} diff --git a/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration/egressfirewall/v1/egressfirewallstatus.go b/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration/egressfirewall/v1/egressfirewallstatus.go index 154aa7e5f4..48b847bdd5 100644 --- a/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration/egressfirewall/v1/egressfirewallstatus.go +++ b/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration/egressfirewall/v1/egressfirewallstatus.go @@ -17,14 +17,14 @@ limitations under the License. package v1 -// EgressFirewallStatusApplyConfiguration represents an declarative configuration of the EgressFirewallStatus type for use +// EgressFirewallStatusApplyConfiguration represents a declarative configuration of the EgressFirewallStatus type for use // with apply. type EgressFirewallStatusApplyConfiguration struct { Status *string `json:"status,omitempty"` Messages []string `json:"messages,omitempty"` } -// EgressFirewallStatusApplyConfiguration constructs an declarative configuration of the EgressFirewallStatus type for use with +// EgressFirewallStatusApplyConfiguration constructs a declarative configuration of the EgressFirewallStatus type for use with // apply. func EgressFirewallStatus() *EgressFirewallStatusApplyConfiguration { return &EgressFirewallStatusApplyConfiguration{} diff --git a/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration/utils.go b/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration/utils.go index 2faef5ad04..26ed724264 100644 --- a/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration/utils.go +++ b/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration/utils.go @@ -20,7 +20,10 @@ package applyconfiguration import ( v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1" egressfirewallv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration/egressfirewall/v1" + internal "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration/internal" + runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" ) // ForKind returns an apply configuration type for the given GroupVersionKind, or nil if no @@ -44,3 +47,7 @@ func ForKind(kind schema.GroupVersionKind) interface{} { } return nil } + +func NewTypeConverter(scheme *runtime.Scheme) *testing.TypeConverter { + return &testing.TypeConverter{Scheme: scheme, TypeResolver: internal.Parser()} +} diff --git a/go-controller/pkg/crd/egressfirewall/v1/apis/clientset/versioned/fake/clientset_generated.go b/go-controller/pkg/crd/egressfirewall/v1/apis/clientset/versioned/fake/clientset_generated.go index 59ffb7d76f..050b373585 100644 --- a/go-controller/pkg/crd/egressfirewall/v1/apis/clientset/versioned/fake/clientset_generated.go +++ b/go-controller/pkg/crd/egressfirewall/v1/apis/clientset/versioned/fake/clientset_generated.go @@ -18,6 +18,7 @@ limitations under the License. package fake import ( + applyconfiguration "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration" clientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/clientset/versioned" k8sv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/clientset/versioned/typed/egressfirewall/v1" fakek8sv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/clientset/versioned/typed/egressfirewall/v1/fake" @@ -30,8 +31,12 @@ import ( // NewSimpleClientset returns a clientset that will respond with the provided objects. // It's backed by a very simple object tracker that processes creates, updates and deletions as-is, -// without applying any validations and/or defaults. It shouldn't be considered a replacement +// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement // for a real clientset and is mostly useful in simple unit tests. +// +// DEPRECATED: NewClientset replaces this with support for field management, which significantly improves +// server side apply testing. NewClientset is only available when apply configurations are generated (e.g. +// via --with-applyconfig). func NewSimpleClientset(objects ...runtime.Object) *Clientset { o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) for _, obj := range objects { @@ -73,6 +78,38 @@ func (c *Clientset) Tracker() testing.ObjectTracker { return c.tracker } +// NewClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewClientset(objects ...runtime.Object) *Clientset { + o := testing.NewFieldManagedObjectTracker( + scheme, + codecs.UniversalDecoder(), + applyconfiguration.NewTypeConverter(scheme), + ) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + var ( _ clientset.Interface = &Clientset{} _ testing.FakeClient = &Clientset{} diff --git a/go-controller/pkg/crd/egressfirewall/v1/apis/clientset/versioned/typed/egressfirewall/v1/egressfirewall.go b/go-controller/pkg/crd/egressfirewall/v1/apis/clientset/versioned/typed/egressfirewall/v1/egressfirewall.go index c7291eab6c..6c07b5b976 100644 --- a/go-controller/pkg/crd/egressfirewall/v1/apis/clientset/versioned/typed/egressfirewall/v1/egressfirewall.go +++ b/go-controller/pkg/crd/egressfirewall/v1/apis/clientset/versioned/typed/egressfirewall/v1/egressfirewall.go @@ -19,9 +19,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1" egressfirewallv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/applyconfiguration/egressfirewall/v1" @@ -29,7 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // EgressFirewallsGetter has a method to return a EgressFirewallInterface. @@ -42,6 +39,7 @@ type EgressFirewallsGetter interface { type EgressFirewallInterface interface { Create(ctx context.Context, egressFirewall *v1.EgressFirewall, opts metav1.CreateOptions) (*v1.EgressFirewall, error) Update(ctx context.Context, egressFirewall *v1.EgressFirewall, opts metav1.UpdateOptions) (*v1.EgressFirewall, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, egressFirewall *v1.EgressFirewall, opts metav1.UpdateOptions) (*v1.EgressFirewall, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -50,206 +48,25 @@ type EgressFirewallInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EgressFirewall, err error) Apply(ctx context.Context, egressFirewall *egressfirewallv1.EgressFirewallApplyConfiguration, opts metav1.ApplyOptions) (result *v1.EgressFirewall, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, egressFirewall *egressfirewallv1.EgressFirewallApplyConfiguration, opts metav1.ApplyOptions) (result *v1.EgressFirewall, err error) EgressFirewallExpansion } // egressFirewalls implements EgressFirewallInterface type egressFirewalls struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.EgressFirewall, *v1.EgressFirewallList, *egressfirewallv1.EgressFirewallApplyConfiguration] } // newEgressFirewalls returns a EgressFirewalls func newEgressFirewalls(c *K8sV1Client, namespace string) *egressFirewalls { return &egressFirewalls{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.EgressFirewall, *v1.EgressFirewallList, *egressfirewallv1.EgressFirewallApplyConfiguration]( + "egressfirewalls", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.EgressFirewall { return &v1.EgressFirewall{} }, + func() *v1.EgressFirewallList { return &v1.EgressFirewallList{} }), } } - -// Get takes name of the egressFirewall, and returns the corresponding egressFirewall object, and an error if there is any. -func (c *egressFirewalls) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.EgressFirewall, err error) { - result = &v1.EgressFirewall{} - err = c.client.Get(). - Namespace(c.ns). - Resource("egressfirewalls"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of EgressFirewalls that match those selectors. -func (c *egressFirewalls) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EgressFirewallList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.EgressFirewallList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("egressfirewalls"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested egressFirewalls. -func (c *egressFirewalls) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("egressfirewalls"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a egressFirewall and creates it. Returns the server's representation of the egressFirewall, and an error, if there is any. -func (c *egressFirewalls) Create(ctx context.Context, egressFirewall *v1.EgressFirewall, opts metav1.CreateOptions) (result *v1.EgressFirewall, err error) { - result = &v1.EgressFirewall{} - err = c.client.Post(). - Namespace(c.ns). - Resource("egressfirewalls"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(egressFirewall). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a egressFirewall and updates it. Returns the server's representation of the egressFirewall, and an error, if there is any. -func (c *egressFirewalls) Update(ctx context.Context, egressFirewall *v1.EgressFirewall, opts metav1.UpdateOptions) (result *v1.EgressFirewall, err error) { - result = &v1.EgressFirewall{} - err = c.client.Put(). - Namespace(c.ns). - Resource("egressfirewalls"). - Name(egressFirewall.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(egressFirewall). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *egressFirewalls) UpdateStatus(ctx context.Context, egressFirewall *v1.EgressFirewall, opts metav1.UpdateOptions) (result *v1.EgressFirewall, err error) { - result = &v1.EgressFirewall{} - err = c.client.Put(). - Namespace(c.ns). - Resource("egressfirewalls"). - Name(egressFirewall.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(egressFirewall). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the egressFirewall and deletes it. Returns an error if one occurs. -func (c *egressFirewalls) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("egressfirewalls"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *egressFirewalls) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("egressfirewalls"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched egressFirewall. -func (c *egressFirewalls) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EgressFirewall, err error) { - result = &v1.EgressFirewall{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("egressfirewalls"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied egressFirewall. -func (c *egressFirewalls) Apply(ctx context.Context, egressFirewall *egressfirewallv1.EgressFirewallApplyConfiguration, opts metav1.ApplyOptions) (result *v1.EgressFirewall, err error) { - if egressFirewall == nil { - return nil, fmt.Errorf("egressFirewall provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(egressFirewall) - if err != nil { - return nil, err - } - name := egressFirewall.Name - if name == nil { - return nil, fmt.Errorf("egressFirewall.Name must be provided to Apply") - } - result = &v1.EgressFirewall{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("egressfirewalls"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *egressFirewalls) ApplyStatus(ctx context.Context, egressFirewall *egressfirewallv1.EgressFirewallApplyConfiguration, opts metav1.ApplyOptions) (result *v1.EgressFirewall, err error) { - if egressFirewall == nil { - return nil, fmt.Errorf("egressFirewall provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(egressFirewall) - if err != nil { - return nil, err - } - - name := egressFirewall.Name - if name == nil { - return nil, fmt.Errorf("egressFirewall.Name must be provided to Apply") - } - - result = &v1.EgressFirewall{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("egressfirewalls"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/go-controller/pkg/crd/egressfirewall/v1/apis/clientset/versioned/typed/egressfirewall/v1/fake/fake_egressfirewall.go b/go-controller/pkg/crd/egressfirewall/v1/apis/clientset/versioned/typed/egressfirewall/v1/fake/fake_egressfirewall.go index 2c47e30c4b..91b136e0af 100644 --- a/go-controller/pkg/crd/egressfirewall/v1/apis/clientset/versioned/typed/egressfirewall/v1/fake/fake_egressfirewall.go +++ b/go-controller/pkg/crd/egressfirewall/v1/apis/clientset/versioned/typed/egressfirewall/v1/fake/fake_egressfirewall.go @@ -43,22 +43,24 @@ var egressfirewallsKind = v1.SchemeGroupVersion.WithKind("EgressFirewall") // Get takes name of the egressFirewall, and returns the corresponding egressFirewall object, and an error if there is any. func (c *FakeEgressFirewalls) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.EgressFirewall, err error) { + emptyResult := &v1.EgressFirewall{} obj, err := c.Fake. - Invokes(testing.NewGetAction(egressfirewallsResource, c.ns, name), &v1.EgressFirewall{}) + Invokes(testing.NewGetActionWithOptions(egressfirewallsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EgressFirewall), err } // List takes label and field selectors, and returns the list of EgressFirewalls that match those selectors. func (c *FakeEgressFirewalls) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EgressFirewallList, err error) { + emptyResult := &v1.EgressFirewallList{} obj, err := c.Fake. - Invokes(testing.NewListAction(egressfirewallsResource, egressfirewallsKind, c.ns, opts), &v1.EgressFirewallList{}) + Invokes(testing.NewListActionWithOptions(egressfirewallsResource, egressfirewallsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -77,40 +79,43 @@ func (c *FakeEgressFirewalls) List(ctx context.Context, opts metav1.ListOptions) // Watch returns a watch.Interface that watches the requested egressFirewalls. func (c *FakeEgressFirewalls) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(egressfirewallsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(egressfirewallsResource, c.ns, opts)) } // Create takes the representation of a egressFirewall and creates it. Returns the server's representation of the egressFirewall, and an error, if there is any. func (c *FakeEgressFirewalls) Create(ctx context.Context, egressFirewall *v1.EgressFirewall, opts metav1.CreateOptions) (result *v1.EgressFirewall, err error) { + emptyResult := &v1.EgressFirewall{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(egressfirewallsResource, c.ns, egressFirewall), &v1.EgressFirewall{}) + Invokes(testing.NewCreateActionWithOptions(egressfirewallsResource, c.ns, egressFirewall, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EgressFirewall), err } // Update takes the representation of a egressFirewall and updates it. Returns the server's representation of the egressFirewall, and an error, if there is any. func (c *FakeEgressFirewalls) Update(ctx context.Context, egressFirewall *v1.EgressFirewall, opts metav1.UpdateOptions) (result *v1.EgressFirewall, err error) { + emptyResult := &v1.EgressFirewall{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(egressfirewallsResource, c.ns, egressFirewall), &v1.EgressFirewall{}) + Invokes(testing.NewUpdateActionWithOptions(egressfirewallsResource, c.ns, egressFirewall, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EgressFirewall), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeEgressFirewalls) UpdateStatus(ctx context.Context, egressFirewall *v1.EgressFirewall, opts metav1.UpdateOptions) (*v1.EgressFirewall, error) { +func (c *FakeEgressFirewalls) UpdateStatus(ctx context.Context, egressFirewall *v1.EgressFirewall, opts metav1.UpdateOptions) (result *v1.EgressFirewall, err error) { + emptyResult := &v1.EgressFirewall{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(egressfirewallsResource, "status", c.ns, egressFirewall), &v1.EgressFirewall{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(egressfirewallsResource, "status", c.ns, egressFirewall, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EgressFirewall), err } @@ -125,7 +130,7 @@ func (c *FakeEgressFirewalls) Delete(ctx context.Context, name string, opts meta // DeleteCollection deletes a collection of objects. func (c *FakeEgressFirewalls) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(egressfirewallsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(egressfirewallsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.EgressFirewallList{}) return err @@ -133,11 +138,12 @@ func (c *FakeEgressFirewalls) DeleteCollection(ctx context.Context, opts metav1. // Patch applies the patch and returns the patched egressFirewall. func (c *FakeEgressFirewalls) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EgressFirewall, err error) { + emptyResult := &v1.EgressFirewall{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(egressfirewallsResource, c.ns, name, pt, data, subresources...), &v1.EgressFirewall{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(egressfirewallsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EgressFirewall), err } @@ -155,11 +161,12 @@ func (c *FakeEgressFirewalls) Apply(ctx context.Context, egressFirewall *egressf if name == nil { return nil, fmt.Errorf("egressFirewall.Name must be provided to Apply") } + emptyResult := &v1.EgressFirewall{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(egressfirewallsResource, c.ns, *name, types.ApplyPatchType, data), &v1.EgressFirewall{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(egressfirewallsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EgressFirewall), err } @@ -178,11 +185,12 @@ func (c *FakeEgressFirewalls) ApplyStatus(ctx context.Context, egressFirewall *e if name == nil { return nil, fmt.Errorf("egressFirewall.Name must be provided to Apply") } + emptyResult := &v1.EgressFirewall{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(egressfirewallsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.EgressFirewall{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(egressfirewallsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EgressFirewall), err } diff --git a/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go b/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go index d0c0ac6345..8137c202a1 100644 --- a/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go +++ b/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go @@ -227,6 +227,7 @@ type SharedInformerFactory interface { // Start initializes all requested informers. They are handled in goroutines // which run until the stop channel gets closed. + // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. Start(stopCh <-chan struct{}) // Shutdown marks a factory as shutting down. At that point no new diff --git a/go-controller/pkg/crd/egressfirewall/v1/apis/listers/egressfirewall/v1/egressfirewall.go b/go-controller/pkg/crd/egressfirewall/v1/apis/listers/egressfirewall/v1/egressfirewall.go index dc76f8e024..db7ea12465 100644 --- a/go-controller/pkg/crd/egressfirewall/v1/apis/listers/egressfirewall/v1/egressfirewall.go +++ b/go-controller/pkg/crd/egressfirewall/v1/apis/listers/egressfirewall/v1/egressfirewall.go @@ -19,8 +19,8 @@ package v1 import ( v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -37,25 +37,17 @@ type EgressFirewallLister interface { // egressFirewallLister implements the EgressFirewallLister interface. type egressFirewallLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.EgressFirewall] } // NewEgressFirewallLister returns a new EgressFirewallLister. func NewEgressFirewallLister(indexer cache.Indexer) EgressFirewallLister { - return &egressFirewallLister{indexer: indexer} -} - -// List lists all EgressFirewalls in the indexer. -func (s *egressFirewallLister) List(selector labels.Selector) (ret []*v1.EgressFirewall, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.EgressFirewall)) - }) - return ret, err + return &egressFirewallLister{listers.New[*v1.EgressFirewall](indexer, v1.Resource("egressfirewall"))} } // EgressFirewalls returns an object that can list and get EgressFirewalls. func (s *egressFirewallLister) EgressFirewalls(namespace string) EgressFirewallNamespaceLister { - return egressFirewallNamespaceLister{indexer: s.indexer, namespace: namespace} + return egressFirewallNamespaceLister{listers.NewNamespaced[*v1.EgressFirewall](s.ResourceIndexer, namespace)} } // EgressFirewallNamespaceLister helps list and get EgressFirewalls. @@ -73,26 +65,5 @@ type EgressFirewallNamespaceLister interface { // egressFirewallNamespaceLister implements the EgressFirewallNamespaceLister // interface. type egressFirewallNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all EgressFirewalls in the indexer for a given namespace. -func (s egressFirewallNamespaceLister) List(selector labels.Selector) (ret []*v1.EgressFirewall, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.EgressFirewall)) - }) - return ret, err -} - -// Get retrieves the EgressFirewall from the indexer for a given namespace and name. -func (s egressFirewallNamespaceLister) Get(name string) (*v1.EgressFirewall, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("egressfirewall"), name) - } - return obj.(*v1.EgressFirewall), nil + listers.ResourceIndexer[*v1.EgressFirewall] } diff --git a/go-controller/pkg/crd/egressip/v1/apis/applyconfiguration/egressip/v1/egressip.go b/go-controller/pkg/crd/egressip/v1/apis/applyconfiguration/egressip/v1/egressip.go index 9533079ad4..3eba818bde 100644 --- a/go-controller/pkg/crd/egressip/v1/apis/applyconfiguration/egressip/v1/egressip.go +++ b/go-controller/pkg/crd/egressip/v1/apis/applyconfiguration/egressip/v1/egressip.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EgressIPApplyConfiguration represents an declarative configuration of the EgressIP type for use +// EgressIPApplyConfiguration represents a declarative configuration of the EgressIP type for use // with apply. type EgressIPApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -32,7 +32,7 @@ type EgressIPApplyConfiguration struct { Status *EgressIPStatusApplyConfiguration `json:"status,omitempty"` } -// EgressIP constructs an declarative configuration of the EgressIP type for use with +// EgressIP constructs a declarative configuration of the EgressIP type for use with // apply. func EgressIP(name string) *EgressIPApplyConfiguration { b := &EgressIPApplyConfiguration{} @@ -215,3 +215,9 @@ func (b *EgressIPApplyConfiguration) WithStatus(value *EgressIPStatusApplyConfig b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EgressIPApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/go-controller/pkg/crd/egressip/v1/apis/applyconfiguration/egressip/v1/egressipspec.go b/go-controller/pkg/crd/egressip/v1/apis/applyconfiguration/egressip/v1/egressipspec.go index 5f578206d3..386b2d4807 100644 --- a/go-controller/pkg/crd/egressip/v1/apis/applyconfiguration/egressip/v1/egressipspec.go +++ b/go-controller/pkg/crd/egressip/v1/apis/applyconfiguration/egressip/v1/egressipspec.go @@ -18,18 +18,18 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EgressIPSpecApplyConfiguration represents an declarative configuration of the EgressIPSpec type for use +// EgressIPSpecApplyConfiguration represents a declarative configuration of the EgressIPSpec type for use // with apply. type EgressIPSpecApplyConfiguration struct { - EgressIPs []string `json:"egressIPs,omitempty"` - NamespaceSelector *v1.LabelSelector `json:"namespaceSelector,omitempty"` - PodSelector *v1.LabelSelector `json:"podSelector,omitempty"` + EgressIPs []string `json:"egressIPs,omitempty"` + NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + PodSelector *v1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"` } -// EgressIPSpecApplyConfiguration constructs an declarative configuration of the EgressIPSpec type for use with +// EgressIPSpecApplyConfiguration constructs a declarative configuration of the EgressIPSpec type for use with // apply. func EgressIPSpec() *EgressIPSpecApplyConfiguration { return &EgressIPSpecApplyConfiguration{} @@ -48,15 +48,15 @@ func (b *EgressIPSpecApplyConfiguration) WithEgressIPs(values ...string) *Egress // WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NamespaceSelector field is set to the value of the last call. -func (b *EgressIPSpecApplyConfiguration) WithNamespaceSelector(value v1.LabelSelector) *EgressIPSpecApplyConfiguration { - b.NamespaceSelector = &value +func (b *EgressIPSpecApplyConfiguration) WithNamespaceSelector(value *v1.LabelSelectorApplyConfiguration) *EgressIPSpecApplyConfiguration { + b.NamespaceSelector = value return b } // WithPodSelector sets the PodSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PodSelector field is set to the value of the last call. -func (b *EgressIPSpecApplyConfiguration) WithPodSelector(value v1.LabelSelector) *EgressIPSpecApplyConfiguration { - b.PodSelector = &value +func (b *EgressIPSpecApplyConfiguration) WithPodSelector(value *v1.LabelSelectorApplyConfiguration) *EgressIPSpecApplyConfiguration { + b.PodSelector = value return b } diff --git a/go-controller/pkg/crd/egressip/v1/apis/applyconfiguration/egressip/v1/egressipstatus.go b/go-controller/pkg/crd/egressip/v1/apis/applyconfiguration/egressip/v1/egressipstatus.go index ff4daa6377..3a08bc1549 100644 --- a/go-controller/pkg/crd/egressip/v1/apis/applyconfiguration/egressip/v1/egressipstatus.go +++ b/go-controller/pkg/crd/egressip/v1/apis/applyconfiguration/egressip/v1/egressipstatus.go @@ -17,13 +17,13 @@ limitations under the License. package v1 -// EgressIPStatusApplyConfiguration represents an declarative configuration of the EgressIPStatus type for use +// EgressIPStatusApplyConfiguration represents a declarative configuration of the EgressIPStatus type for use // with apply. type EgressIPStatusApplyConfiguration struct { Items []EgressIPStatusItemApplyConfiguration `json:"items,omitempty"` } -// EgressIPStatusApplyConfiguration constructs an declarative configuration of the EgressIPStatus type for use with +// EgressIPStatusApplyConfiguration constructs a declarative configuration of the EgressIPStatus type for use with // apply. func EgressIPStatus() *EgressIPStatusApplyConfiguration { return &EgressIPStatusApplyConfiguration{} diff --git a/go-controller/pkg/crd/egressip/v1/apis/applyconfiguration/egressip/v1/egressipstatusitem.go b/go-controller/pkg/crd/egressip/v1/apis/applyconfiguration/egressip/v1/egressipstatusitem.go index 5f7eef5b24..dcff04177a 100644 --- a/go-controller/pkg/crd/egressip/v1/apis/applyconfiguration/egressip/v1/egressipstatusitem.go +++ b/go-controller/pkg/crd/egressip/v1/apis/applyconfiguration/egressip/v1/egressipstatusitem.go @@ -17,14 +17,14 @@ limitations under the License. package v1 -// EgressIPStatusItemApplyConfiguration represents an declarative configuration of the EgressIPStatusItem type for use +// EgressIPStatusItemApplyConfiguration represents a declarative configuration of the EgressIPStatusItem type for use // with apply. type EgressIPStatusItemApplyConfiguration struct { Node *string `json:"node,omitempty"` EgressIP *string `json:"egressIP,omitempty"` } -// EgressIPStatusItemApplyConfiguration constructs an declarative configuration of the EgressIPStatusItem type for use with +// EgressIPStatusItemApplyConfiguration constructs a declarative configuration of the EgressIPStatusItem type for use with // apply. func EgressIPStatusItem() *EgressIPStatusItemApplyConfiguration { return &EgressIPStatusItemApplyConfiguration{} diff --git a/go-controller/pkg/crd/egressip/v1/apis/applyconfiguration/utils.go b/go-controller/pkg/crd/egressip/v1/apis/applyconfiguration/utils.go index 24121ee5d8..b3b292bee9 100644 --- a/go-controller/pkg/crd/egressip/v1/apis/applyconfiguration/utils.go +++ b/go-controller/pkg/crd/egressip/v1/apis/applyconfiguration/utils.go @@ -20,7 +20,10 @@ package applyconfiguration import ( v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1" egressipv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/applyconfiguration/egressip/v1" + internal "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/applyconfiguration/internal" + runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" ) // ForKind returns an apply configuration type for the given GroupVersionKind, or nil if no @@ -40,3 +43,7 @@ func ForKind(kind schema.GroupVersionKind) interface{} { } return nil } + +func NewTypeConverter(scheme *runtime.Scheme) *testing.TypeConverter { + return &testing.TypeConverter{Scheme: scheme, TypeResolver: internal.Parser()} +} diff --git a/go-controller/pkg/crd/egressip/v1/apis/clientset/versioned/fake/clientset_generated.go b/go-controller/pkg/crd/egressip/v1/apis/clientset/versioned/fake/clientset_generated.go index 1da259e93d..d1a2d2cc01 100644 --- a/go-controller/pkg/crd/egressip/v1/apis/clientset/versioned/fake/clientset_generated.go +++ b/go-controller/pkg/crd/egressip/v1/apis/clientset/versioned/fake/clientset_generated.go @@ -18,6 +18,7 @@ limitations under the License. package fake import ( + applyconfiguration "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/applyconfiguration" clientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/clientset/versioned" k8sv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/clientset/versioned/typed/egressip/v1" fakek8sv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/clientset/versioned/typed/egressip/v1/fake" @@ -30,8 +31,12 @@ import ( // NewSimpleClientset returns a clientset that will respond with the provided objects. // It's backed by a very simple object tracker that processes creates, updates and deletions as-is, -// without applying any validations and/or defaults. It shouldn't be considered a replacement +// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement // for a real clientset and is mostly useful in simple unit tests. +// +// DEPRECATED: NewClientset replaces this with support for field management, which significantly improves +// server side apply testing. NewClientset is only available when apply configurations are generated (e.g. +// via --with-applyconfig). func NewSimpleClientset(objects ...runtime.Object) *Clientset { o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) for _, obj := range objects { @@ -73,6 +78,38 @@ func (c *Clientset) Tracker() testing.ObjectTracker { return c.tracker } +// NewClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewClientset(objects ...runtime.Object) *Clientset { + o := testing.NewFieldManagedObjectTracker( + scheme, + codecs.UniversalDecoder(), + applyconfiguration.NewTypeConverter(scheme), + ) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + var ( _ clientset.Interface = &Clientset{} _ testing.FakeClient = &Clientset{} diff --git a/go-controller/pkg/crd/egressip/v1/apis/clientset/versioned/typed/egressip/v1/egressip.go b/go-controller/pkg/crd/egressip/v1/apis/clientset/versioned/typed/egressip/v1/egressip.go index 0bdd0e9933..3a0f5f2518 100644 --- a/go-controller/pkg/crd/egressip/v1/apis/clientset/versioned/typed/egressip/v1/egressip.go +++ b/go-controller/pkg/crd/egressip/v1/apis/clientset/versioned/typed/egressip/v1/egressip.go @@ -19,9 +19,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1" egressipv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/applyconfiguration/egressip/v1" @@ -29,7 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // EgressIPsGetter has a method to return a EgressIPInterface. @@ -54,143 +51,18 @@ type EgressIPInterface interface { // egressIPs implements EgressIPInterface type egressIPs struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.EgressIP, *v1.EgressIPList, *egressipv1.EgressIPApplyConfiguration] } // newEgressIPs returns a EgressIPs func newEgressIPs(c *K8sV1Client) *egressIPs { return &egressIPs{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.EgressIP, *v1.EgressIPList, *egressipv1.EgressIPApplyConfiguration]( + "egressips", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.EgressIP { return &v1.EgressIP{} }, + func() *v1.EgressIPList { return &v1.EgressIPList{} }), } } - -// Get takes name of the egressIP, and returns the corresponding egressIP object, and an error if there is any. -func (c *egressIPs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.EgressIP, err error) { - result = &v1.EgressIP{} - err = c.client.Get(). - Resource("egressips"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of EgressIPs that match those selectors. -func (c *egressIPs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EgressIPList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.EgressIPList{} - err = c.client.Get(). - Resource("egressips"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested egressIPs. -func (c *egressIPs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("egressips"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a egressIP and creates it. Returns the server's representation of the egressIP, and an error, if there is any. -func (c *egressIPs) Create(ctx context.Context, egressIP *v1.EgressIP, opts metav1.CreateOptions) (result *v1.EgressIP, err error) { - result = &v1.EgressIP{} - err = c.client.Post(). - Resource("egressips"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(egressIP). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a egressIP and updates it. Returns the server's representation of the egressIP, and an error, if there is any. -func (c *egressIPs) Update(ctx context.Context, egressIP *v1.EgressIP, opts metav1.UpdateOptions) (result *v1.EgressIP, err error) { - result = &v1.EgressIP{} - err = c.client.Put(). - Resource("egressips"). - Name(egressIP.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(egressIP). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the egressIP and deletes it. Returns an error if one occurs. -func (c *egressIPs) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("egressips"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *egressIPs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("egressips"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched egressIP. -func (c *egressIPs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EgressIP, err error) { - result = &v1.EgressIP{} - err = c.client.Patch(pt). - Resource("egressips"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied egressIP. -func (c *egressIPs) Apply(ctx context.Context, egressIP *egressipv1.EgressIPApplyConfiguration, opts metav1.ApplyOptions) (result *v1.EgressIP, err error) { - if egressIP == nil { - return nil, fmt.Errorf("egressIP provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(egressIP) - if err != nil { - return nil, err - } - name := egressIP.Name - if name == nil { - return nil, fmt.Errorf("egressIP.Name must be provided to Apply") - } - result = &v1.EgressIP{} - err = c.client.Patch(types.ApplyPatchType). - Resource("egressips"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/go-controller/pkg/crd/egressip/v1/apis/clientset/versioned/typed/egressip/v1/fake/fake_egressip.go b/go-controller/pkg/crd/egressip/v1/apis/clientset/versioned/typed/egressip/v1/fake/fake_egressip.go index 9f6c6dc7f0..81b3a27372 100644 --- a/go-controller/pkg/crd/egressip/v1/apis/clientset/versioned/typed/egressip/v1/fake/fake_egressip.go +++ b/go-controller/pkg/crd/egressip/v1/apis/clientset/versioned/typed/egressip/v1/fake/fake_egressip.go @@ -42,20 +42,22 @@ var egressipsKind = v1.SchemeGroupVersion.WithKind("EgressIP") // Get takes name of the egressIP, and returns the corresponding egressIP object, and an error if there is any. func (c *FakeEgressIPs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.EgressIP, err error) { + emptyResult := &v1.EgressIP{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(egressipsResource, name), &v1.EgressIP{}) + Invokes(testing.NewRootGetActionWithOptions(egressipsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EgressIP), err } // List takes label and field selectors, and returns the list of EgressIPs that match those selectors. func (c *FakeEgressIPs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EgressIPList, err error) { + emptyResult := &v1.EgressIPList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(egressipsResource, egressipsKind, opts), &v1.EgressIPList{}) + Invokes(testing.NewRootListActionWithOptions(egressipsResource, egressipsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -74,25 +76,27 @@ func (c *FakeEgressIPs) List(ctx context.Context, opts metav1.ListOptions) (resu // Watch returns a watch.Interface that watches the requested egressIPs. func (c *FakeEgressIPs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(egressipsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(egressipsResource, opts)) } // Create takes the representation of a egressIP and creates it. Returns the server's representation of the egressIP, and an error, if there is any. func (c *FakeEgressIPs) Create(ctx context.Context, egressIP *v1.EgressIP, opts metav1.CreateOptions) (result *v1.EgressIP, err error) { + emptyResult := &v1.EgressIP{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(egressipsResource, egressIP), &v1.EgressIP{}) + Invokes(testing.NewRootCreateActionWithOptions(egressipsResource, egressIP, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EgressIP), err } // Update takes the representation of a egressIP and updates it. Returns the server's representation of the egressIP, and an error, if there is any. func (c *FakeEgressIPs) Update(ctx context.Context, egressIP *v1.EgressIP, opts metav1.UpdateOptions) (result *v1.EgressIP, err error) { + emptyResult := &v1.EgressIP{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(egressipsResource, egressIP), &v1.EgressIP{}) + Invokes(testing.NewRootUpdateActionWithOptions(egressipsResource, egressIP, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EgressIP), err } @@ -106,7 +110,7 @@ func (c *FakeEgressIPs) Delete(ctx context.Context, name string, opts metav1.Del // DeleteCollection deletes a collection of objects. func (c *FakeEgressIPs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(egressipsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(egressipsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.EgressIPList{}) return err @@ -114,10 +118,11 @@ func (c *FakeEgressIPs) DeleteCollection(ctx context.Context, opts metav1.Delete // Patch applies the patch and returns the patched egressIP. func (c *FakeEgressIPs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EgressIP, err error) { + emptyResult := &v1.EgressIP{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(egressipsResource, name, pt, data, subresources...), &v1.EgressIP{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(egressipsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EgressIP), err } @@ -135,10 +140,11 @@ func (c *FakeEgressIPs) Apply(ctx context.Context, egressIP *egressipv1.EgressIP if name == nil { return nil, fmt.Errorf("egressIP.Name must be provided to Apply") } + emptyResult := &v1.EgressIP{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(egressipsResource, *name, types.ApplyPatchType, data), &v1.EgressIP{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(egressipsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EgressIP), err } diff --git a/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go b/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go index 8104401bff..1d30b21240 100644 --- a/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go +++ b/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go @@ -227,6 +227,7 @@ type SharedInformerFactory interface { // Start initializes all requested informers. They are handled in goroutines // which run until the stop channel gets closed. + // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. Start(stopCh <-chan struct{}) // Shutdown marks a factory as shutting down. At that point no new diff --git a/go-controller/pkg/crd/egressip/v1/apis/listers/egressip/v1/egressip.go b/go-controller/pkg/crd/egressip/v1/apis/listers/egressip/v1/egressip.go index bdfcc689a0..eca1b7e575 100644 --- a/go-controller/pkg/crd/egressip/v1/apis/listers/egressip/v1/egressip.go +++ b/go-controller/pkg/crd/egressip/v1/apis/listers/egressip/v1/egressip.go @@ -19,8 +19,8 @@ package v1 import ( v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,30 +38,10 @@ type EgressIPLister interface { // egressIPLister implements the EgressIPLister interface. type egressIPLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.EgressIP] } // NewEgressIPLister returns a new EgressIPLister. func NewEgressIPLister(indexer cache.Indexer) EgressIPLister { - return &egressIPLister{indexer: indexer} -} - -// List lists all EgressIPs in the indexer. -func (s *egressIPLister) List(selector labels.Selector) (ret []*v1.EgressIP, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.EgressIP)) - }) - return ret, err -} - -// Get retrieves the EgressIP from the index for a given name. -func (s *egressIPLister) Get(name string) (*v1.EgressIP, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("egressip"), name) - } - return obj.(*v1.EgressIP), nil + return &egressIPLister{listers.New[*v1.EgressIP](indexer, v1.Resource("egressip"))} } diff --git a/go-controller/pkg/crd/egressqos/v1/apis/applyconfiguration/egressqos/v1/egressqos.go b/go-controller/pkg/crd/egressqos/v1/apis/applyconfiguration/egressqos/v1/egressqos.go index b7b6ea4f96..a2b3e2bfde 100644 --- a/go-controller/pkg/crd/egressqos/v1/apis/applyconfiguration/egressqos/v1/egressqos.go +++ b/go-controller/pkg/crd/egressqos/v1/apis/applyconfiguration/egressqos/v1/egressqos.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EgressQoSApplyConfiguration represents an declarative configuration of the EgressQoS type for use +// EgressQoSApplyConfiguration represents a declarative configuration of the EgressQoS type for use // with apply. type EgressQoSApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -32,7 +32,7 @@ type EgressQoSApplyConfiguration struct { Status *EgressQoSStatusApplyConfiguration `json:"status,omitempty"` } -// EgressQoS constructs an declarative configuration of the EgressQoS type for use with +// EgressQoS constructs a declarative configuration of the EgressQoS type for use with // apply. func EgressQoS(name, namespace string) *EgressQoSApplyConfiguration { b := &EgressQoSApplyConfiguration{} @@ -216,3 +216,9 @@ func (b *EgressQoSApplyConfiguration) WithStatus(value *EgressQoSStatusApplyConf b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EgressQoSApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/go-controller/pkg/crd/egressqos/v1/apis/applyconfiguration/egressqos/v1/egressqosrule.go b/go-controller/pkg/crd/egressqos/v1/apis/applyconfiguration/egressqos/v1/egressqosrule.go index 2a51bf5f11..e2e337c6e6 100644 --- a/go-controller/pkg/crd/egressqos/v1/apis/applyconfiguration/egressqos/v1/egressqosrule.go +++ b/go-controller/pkg/crd/egressqos/v1/apis/applyconfiguration/egressqos/v1/egressqosrule.go @@ -18,18 +18,18 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EgressQoSRuleApplyConfiguration represents an declarative configuration of the EgressQoSRule type for use +// EgressQoSRuleApplyConfiguration represents a declarative configuration of the EgressQoSRule type for use // with apply. type EgressQoSRuleApplyConfiguration struct { - DSCP *int `json:"dscp,omitempty"` - DstCIDR *string `json:"dstCIDR,omitempty"` - PodSelector *v1.LabelSelector `json:"podSelector,omitempty"` + DSCP *int `json:"dscp,omitempty"` + DstCIDR *string `json:"dstCIDR,omitempty"` + PodSelector *v1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"` } -// EgressQoSRuleApplyConfiguration constructs an declarative configuration of the EgressQoSRule type for use with +// EgressQoSRuleApplyConfiguration constructs a declarative configuration of the EgressQoSRule type for use with // apply. func EgressQoSRule() *EgressQoSRuleApplyConfiguration { return &EgressQoSRuleApplyConfiguration{} @@ -54,7 +54,7 @@ func (b *EgressQoSRuleApplyConfiguration) WithDstCIDR(value string) *EgressQoSRu // WithPodSelector sets the PodSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PodSelector field is set to the value of the last call. -func (b *EgressQoSRuleApplyConfiguration) WithPodSelector(value v1.LabelSelector) *EgressQoSRuleApplyConfiguration { - b.PodSelector = &value +func (b *EgressQoSRuleApplyConfiguration) WithPodSelector(value *v1.LabelSelectorApplyConfiguration) *EgressQoSRuleApplyConfiguration { + b.PodSelector = value return b } diff --git a/go-controller/pkg/crd/egressqos/v1/apis/applyconfiguration/egressqos/v1/egressqosspec.go b/go-controller/pkg/crd/egressqos/v1/apis/applyconfiguration/egressqos/v1/egressqosspec.go index 57d819bffc..55b0771448 100644 --- a/go-controller/pkg/crd/egressqos/v1/apis/applyconfiguration/egressqos/v1/egressqosspec.go +++ b/go-controller/pkg/crd/egressqos/v1/apis/applyconfiguration/egressqos/v1/egressqosspec.go @@ -17,13 +17,13 @@ limitations under the License. package v1 -// EgressQoSSpecApplyConfiguration represents an declarative configuration of the EgressQoSSpec type for use +// EgressQoSSpecApplyConfiguration represents a declarative configuration of the EgressQoSSpec type for use // with apply. type EgressQoSSpecApplyConfiguration struct { Egress []EgressQoSRuleApplyConfiguration `json:"egress,omitempty"` } -// EgressQoSSpecApplyConfiguration constructs an declarative configuration of the EgressQoSSpec type for use with +// EgressQoSSpecApplyConfiguration constructs a declarative configuration of the EgressQoSSpec type for use with // apply. func EgressQoSSpec() *EgressQoSSpecApplyConfiguration { return &EgressQoSSpecApplyConfiguration{} diff --git a/go-controller/pkg/crd/egressqos/v1/apis/applyconfiguration/egressqos/v1/egressqosstatus.go b/go-controller/pkg/crd/egressqos/v1/apis/applyconfiguration/egressqos/v1/egressqosstatus.go index a96b0b3064..291b698936 100644 --- a/go-controller/pkg/crd/egressqos/v1/apis/applyconfiguration/egressqos/v1/egressqosstatus.go +++ b/go-controller/pkg/crd/egressqos/v1/apis/applyconfiguration/egressqos/v1/egressqosstatus.go @@ -18,17 +18,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EgressQoSStatusApplyConfiguration represents an declarative configuration of the EgressQoSStatus type for use +// EgressQoSStatusApplyConfiguration represents a declarative configuration of the EgressQoSStatus type for use // with apply. type EgressQoSStatusApplyConfiguration struct { - Status *string `json:"status,omitempty"` - Conditions []v1.Condition `json:"conditions,omitempty"` + Status *string `json:"status,omitempty"` + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` } -// EgressQoSStatusApplyConfiguration constructs an declarative configuration of the EgressQoSStatus type for use with +// EgressQoSStatusApplyConfiguration constructs a declarative configuration of the EgressQoSStatus type for use with // apply. func EgressQoSStatus() *EgressQoSStatusApplyConfiguration { return &EgressQoSStatusApplyConfiguration{} @@ -45,9 +45,12 @@ func (b *EgressQoSStatusApplyConfiguration) WithStatus(value string) *EgressQoSS // WithConditions adds the given value to the Conditions field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Conditions field. -func (b *EgressQoSStatusApplyConfiguration) WithConditions(values ...v1.Condition) *EgressQoSStatusApplyConfiguration { +func (b *EgressQoSStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *EgressQoSStatusApplyConfiguration { for i := range values { - b.Conditions = append(b.Conditions, values[i]) + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) } return b } diff --git a/go-controller/pkg/crd/egressqos/v1/apis/applyconfiguration/utils.go b/go-controller/pkg/crd/egressqos/v1/apis/applyconfiguration/utils.go index d2b0792074..f39ebabf56 100644 --- a/go-controller/pkg/crd/egressqos/v1/apis/applyconfiguration/utils.go +++ b/go-controller/pkg/crd/egressqos/v1/apis/applyconfiguration/utils.go @@ -20,7 +20,10 @@ package applyconfiguration import ( v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1" egressqosv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/applyconfiguration/egressqos/v1" + internal "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/applyconfiguration/internal" + runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" ) // ForKind returns an apply configuration type for the given GroupVersionKind, or nil if no @@ -40,3 +43,7 @@ func ForKind(kind schema.GroupVersionKind) interface{} { } return nil } + +func NewTypeConverter(scheme *runtime.Scheme) *testing.TypeConverter { + return &testing.TypeConverter{Scheme: scheme, TypeResolver: internal.Parser()} +} diff --git a/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/fake/clientset_generated.go b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/fake/clientset_generated.go index 5ba73126ce..3e044a34f8 100644 --- a/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/fake/clientset_generated.go +++ b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/fake/clientset_generated.go @@ -18,6 +18,7 @@ limitations under the License. package fake import ( + applyconfiguration "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/applyconfiguration" clientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned" k8sv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1" fakek8sv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1/fake" @@ -30,8 +31,12 @@ import ( // NewSimpleClientset returns a clientset that will respond with the provided objects. // It's backed by a very simple object tracker that processes creates, updates and deletions as-is, -// without applying any validations and/or defaults. It shouldn't be considered a replacement +// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement // for a real clientset and is mostly useful in simple unit tests. +// +// DEPRECATED: NewClientset replaces this with support for field management, which significantly improves +// server side apply testing. NewClientset is only available when apply configurations are generated (e.g. +// via --with-applyconfig). func NewSimpleClientset(objects ...runtime.Object) *Clientset { o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) for _, obj := range objects { @@ -73,6 +78,38 @@ func (c *Clientset) Tracker() testing.ObjectTracker { return c.tracker } +// NewClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewClientset(objects ...runtime.Object) *Clientset { + o := testing.NewFieldManagedObjectTracker( + scheme, + codecs.UniversalDecoder(), + applyconfiguration.NewTypeConverter(scheme), + ) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + var ( _ clientset.Interface = &Clientset{} _ testing.FakeClient = &Clientset{} diff --git a/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1/egressqos.go b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1/egressqos.go index 4bba09ce25..b2465d4aca 100644 --- a/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1/egressqos.go +++ b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1/egressqos.go @@ -19,9 +19,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1" egressqosv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/applyconfiguration/egressqos/v1" @@ -29,7 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // EgressQoSesGetter has a method to return a EgressQoSInterface. @@ -42,6 +39,7 @@ type EgressQoSesGetter interface { type EgressQoSInterface interface { Create(ctx context.Context, egressQoS *v1.EgressQoS, opts metav1.CreateOptions) (*v1.EgressQoS, error) Update(ctx context.Context, egressQoS *v1.EgressQoS, opts metav1.UpdateOptions) (*v1.EgressQoS, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, egressQoS *v1.EgressQoS, opts metav1.UpdateOptions) (*v1.EgressQoS, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -50,206 +48,25 @@ type EgressQoSInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EgressQoS, err error) Apply(ctx context.Context, egressQoS *egressqosv1.EgressQoSApplyConfiguration, opts metav1.ApplyOptions) (result *v1.EgressQoS, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, egressQoS *egressqosv1.EgressQoSApplyConfiguration, opts metav1.ApplyOptions) (result *v1.EgressQoS, err error) EgressQoSExpansion } // egressQoSes implements EgressQoSInterface type egressQoSes struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.EgressQoS, *v1.EgressQoSList, *egressqosv1.EgressQoSApplyConfiguration] } // newEgressQoSes returns a EgressQoSes func newEgressQoSes(c *K8sV1Client, namespace string) *egressQoSes { return &egressQoSes{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.EgressQoS, *v1.EgressQoSList, *egressqosv1.EgressQoSApplyConfiguration]( + "egressqoses", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.EgressQoS { return &v1.EgressQoS{} }, + func() *v1.EgressQoSList { return &v1.EgressQoSList{} }), } } - -// Get takes name of the egressQoS, and returns the corresponding egressQoS object, and an error if there is any. -func (c *egressQoSes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.EgressQoS, err error) { - result = &v1.EgressQoS{} - err = c.client.Get(). - Namespace(c.ns). - Resource("egressqoses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of EgressQoSes that match those selectors. -func (c *egressQoSes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EgressQoSList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.EgressQoSList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("egressqoses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested egressQoSes. -func (c *egressQoSes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("egressqoses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a egressQoS and creates it. Returns the server's representation of the egressQoS, and an error, if there is any. -func (c *egressQoSes) Create(ctx context.Context, egressQoS *v1.EgressQoS, opts metav1.CreateOptions) (result *v1.EgressQoS, err error) { - result = &v1.EgressQoS{} - err = c.client.Post(). - Namespace(c.ns). - Resource("egressqoses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(egressQoS). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a egressQoS and updates it. Returns the server's representation of the egressQoS, and an error, if there is any. -func (c *egressQoSes) Update(ctx context.Context, egressQoS *v1.EgressQoS, opts metav1.UpdateOptions) (result *v1.EgressQoS, err error) { - result = &v1.EgressQoS{} - err = c.client.Put(). - Namespace(c.ns). - Resource("egressqoses"). - Name(egressQoS.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(egressQoS). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *egressQoSes) UpdateStatus(ctx context.Context, egressQoS *v1.EgressQoS, opts metav1.UpdateOptions) (result *v1.EgressQoS, err error) { - result = &v1.EgressQoS{} - err = c.client.Put(). - Namespace(c.ns). - Resource("egressqoses"). - Name(egressQoS.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(egressQoS). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the egressQoS and deletes it. Returns an error if one occurs. -func (c *egressQoSes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("egressqoses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *egressQoSes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("egressqoses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched egressQoS. -func (c *egressQoSes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EgressQoS, err error) { - result = &v1.EgressQoS{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("egressqoses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied egressQoS. -func (c *egressQoSes) Apply(ctx context.Context, egressQoS *egressqosv1.EgressQoSApplyConfiguration, opts metav1.ApplyOptions) (result *v1.EgressQoS, err error) { - if egressQoS == nil { - return nil, fmt.Errorf("egressQoS provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(egressQoS) - if err != nil { - return nil, err - } - name := egressQoS.Name - if name == nil { - return nil, fmt.Errorf("egressQoS.Name must be provided to Apply") - } - result = &v1.EgressQoS{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("egressqoses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *egressQoSes) ApplyStatus(ctx context.Context, egressQoS *egressqosv1.EgressQoSApplyConfiguration, opts metav1.ApplyOptions) (result *v1.EgressQoS, err error) { - if egressQoS == nil { - return nil, fmt.Errorf("egressQoS provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(egressQoS) - if err != nil { - return nil, err - } - - name := egressQoS.Name - if name == nil { - return nil, fmt.Errorf("egressQoS.Name must be provided to Apply") - } - - result = &v1.EgressQoS{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("egressqoses"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1/fake/fake_egressqos.go b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1/fake/fake_egressqos.go index 10aaa690c1..8e872a62ea 100644 --- a/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1/fake/fake_egressqos.go +++ b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1/fake/fake_egressqos.go @@ -43,22 +43,24 @@ var egressqosesKind = v1.SchemeGroupVersion.WithKind("EgressQoS") // Get takes name of the egressQoS, and returns the corresponding egressQoS object, and an error if there is any. func (c *FakeEgressQoSes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.EgressQoS, err error) { + emptyResult := &v1.EgressQoS{} obj, err := c.Fake. - Invokes(testing.NewGetAction(egressqosesResource, c.ns, name), &v1.EgressQoS{}) + Invokes(testing.NewGetActionWithOptions(egressqosesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EgressQoS), err } // List takes label and field selectors, and returns the list of EgressQoSes that match those selectors. func (c *FakeEgressQoSes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EgressQoSList, err error) { + emptyResult := &v1.EgressQoSList{} obj, err := c.Fake. - Invokes(testing.NewListAction(egressqosesResource, egressqosesKind, c.ns, opts), &v1.EgressQoSList{}) + Invokes(testing.NewListActionWithOptions(egressqosesResource, egressqosesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -77,40 +79,43 @@ func (c *FakeEgressQoSes) List(ctx context.Context, opts metav1.ListOptions) (re // Watch returns a watch.Interface that watches the requested egressQoSes. func (c *FakeEgressQoSes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(egressqosesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(egressqosesResource, c.ns, opts)) } // Create takes the representation of a egressQoS and creates it. Returns the server's representation of the egressQoS, and an error, if there is any. func (c *FakeEgressQoSes) Create(ctx context.Context, egressQoS *v1.EgressQoS, opts metav1.CreateOptions) (result *v1.EgressQoS, err error) { + emptyResult := &v1.EgressQoS{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(egressqosesResource, c.ns, egressQoS), &v1.EgressQoS{}) + Invokes(testing.NewCreateActionWithOptions(egressqosesResource, c.ns, egressQoS, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EgressQoS), err } // Update takes the representation of a egressQoS and updates it. Returns the server's representation of the egressQoS, and an error, if there is any. func (c *FakeEgressQoSes) Update(ctx context.Context, egressQoS *v1.EgressQoS, opts metav1.UpdateOptions) (result *v1.EgressQoS, err error) { + emptyResult := &v1.EgressQoS{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(egressqosesResource, c.ns, egressQoS), &v1.EgressQoS{}) + Invokes(testing.NewUpdateActionWithOptions(egressqosesResource, c.ns, egressQoS, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EgressQoS), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeEgressQoSes) UpdateStatus(ctx context.Context, egressQoS *v1.EgressQoS, opts metav1.UpdateOptions) (*v1.EgressQoS, error) { +func (c *FakeEgressQoSes) UpdateStatus(ctx context.Context, egressQoS *v1.EgressQoS, opts metav1.UpdateOptions) (result *v1.EgressQoS, err error) { + emptyResult := &v1.EgressQoS{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(egressqosesResource, "status", c.ns, egressQoS), &v1.EgressQoS{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(egressqosesResource, "status", c.ns, egressQoS, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EgressQoS), err } @@ -125,7 +130,7 @@ func (c *FakeEgressQoSes) Delete(ctx context.Context, name string, opts metav1.D // DeleteCollection deletes a collection of objects. func (c *FakeEgressQoSes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(egressqosesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(egressqosesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.EgressQoSList{}) return err @@ -133,11 +138,12 @@ func (c *FakeEgressQoSes) DeleteCollection(ctx context.Context, opts metav1.Dele // Patch applies the patch and returns the patched egressQoS. func (c *FakeEgressQoSes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EgressQoS, err error) { + emptyResult := &v1.EgressQoS{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(egressqosesResource, c.ns, name, pt, data, subresources...), &v1.EgressQoS{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(egressqosesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EgressQoS), err } @@ -155,11 +161,12 @@ func (c *FakeEgressQoSes) Apply(ctx context.Context, egressQoS *egressqosv1.Egre if name == nil { return nil, fmt.Errorf("egressQoS.Name must be provided to Apply") } + emptyResult := &v1.EgressQoS{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(egressqosesResource, c.ns, *name, types.ApplyPatchType, data), &v1.EgressQoS{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(egressqosesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EgressQoS), err } @@ -178,11 +185,12 @@ func (c *FakeEgressQoSes) ApplyStatus(ctx context.Context, egressQoS *egressqosv if name == nil { return nil, fmt.Errorf("egressQoS.Name must be provided to Apply") } + emptyResult := &v1.EgressQoS{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(egressqosesResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.EgressQoS{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(egressqosesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EgressQoS), err } diff --git a/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go b/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go index 632ee18d05..69c99ba01d 100644 --- a/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go +++ b/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go @@ -227,6 +227,7 @@ type SharedInformerFactory interface { // Start initializes all requested informers. They are handled in goroutines // which run until the stop channel gets closed. + // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. Start(stopCh <-chan struct{}) // Shutdown marks a factory as shutting down. At that point no new diff --git a/go-controller/pkg/crd/egressqos/v1/apis/listers/egressqos/v1/egressqos.go b/go-controller/pkg/crd/egressqos/v1/apis/listers/egressqos/v1/egressqos.go index de3a6c90c2..c681f6e79f 100644 --- a/go-controller/pkg/crd/egressqos/v1/apis/listers/egressqos/v1/egressqos.go +++ b/go-controller/pkg/crd/egressqos/v1/apis/listers/egressqos/v1/egressqos.go @@ -19,8 +19,8 @@ package v1 import ( v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -37,25 +37,17 @@ type EgressQoSLister interface { // egressQoSLister implements the EgressQoSLister interface. type egressQoSLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.EgressQoS] } // NewEgressQoSLister returns a new EgressQoSLister. func NewEgressQoSLister(indexer cache.Indexer) EgressQoSLister { - return &egressQoSLister{indexer: indexer} -} - -// List lists all EgressQoSes in the indexer. -func (s *egressQoSLister) List(selector labels.Selector) (ret []*v1.EgressQoS, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.EgressQoS)) - }) - return ret, err + return &egressQoSLister{listers.New[*v1.EgressQoS](indexer, v1.Resource("egressqos"))} } // EgressQoSes returns an object that can list and get EgressQoSes. func (s *egressQoSLister) EgressQoSes(namespace string) EgressQoSNamespaceLister { - return egressQoSNamespaceLister{indexer: s.indexer, namespace: namespace} + return egressQoSNamespaceLister{listers.NewNamespaced[*v1.EgressQoS](s.ResourceIndexer, namespace)} } // EgressQoSNamespaceLister helps list and get EgressQoSes. @@ -73,26 +65,5 @@ type EgressQoSNamespaceLister interface { // egressQoSNamespaceLister implements the EgressQoSNamespaceLister // interface. type egressQoSNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all EgressQoSes in the indexer for a given namespace. -func (s egressQoSNamespaceLister) List(selector labels.Selector) (ret []*v1.EgressQoS, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.EgressQoS)) - }) - return ret, err -} - -// Get retrieves the EgressQoS from the indexer for a given namespace and name. -func (s egressQoSNamespaceLister) Get(name string) (*v1.EgressQoS, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("egressqos"), name) - } - return obj.(*v1.EgressQoS), nil + listers.ResourceIndexer[*v1.EgressQoS] } diff --git a/go-controller/pkg/crd/egressservice/v1/apis/applyconfiguration/egressservice/v1/egressservice.go b/go-controller/pkg/crd/egressservice/v1/apis/applyconfiguration/egressservice/v1/egressservice.go index c1f234c7c5..000fe32774 100644 --- a/go-controller/pkg/crd/egressservice/v1/apis/applyconfiguration/egressservice/v1/egressservice.go +++ b/go-controller/pkg/crd/egressservice/v1/apis/applyconfiguration/egressservice/v1/egressservice.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EgressServiceApplyConfiguration represents an declarative configuration of the EgressService type for use +// EgressServiceApplyConfiguration represents a declarative configuration of the EgressService type for use // with apply. type EgressServiceApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -32,7 +32,7 @@ type EgressServiceApplyConfiguration struct { Status *EgressServiceStatusApplyConfiguration `json:"status,omitempty"` } -// EgressService constructs an declarative configuration of the EgressService type for use with +// EgressService constructs a declarative configuration of the EgressService type for use with // apply. func EgressService(name, namespace string) *EgressServiceApplyConfiguration { b := &EgressServiceApplyConfiguration{} @@ -216,3 +216,9 @@ func (b *EgressServiceApplyConfiguration) WithStatus(value *EgressServiceStatusA b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EgressServiceApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/go-controller/pkg/crd/egressservice/v1/apis/applyconfiguration/egressservice/v1/egressservicespec.go b/go-controller/pkg/crd/egressservice/v1/apis/applyconfiguration/egressservice/v1/egressservicespec.go index 5b4f18a545..08b8f511ba 100644 --- a/go-controller/pkg/crd/egressservice/v1/apis/applyconfiguration/egressservice/v1/egressservicespec.go +++ b/go-controller/pkg/crd/egressservice/v1/apis/applyconfiguration/egressservice/v1/egressservicespec.go @@ -19,18 +19,18 @@ package v1 import ( v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EgressServiceSpecApplyConfiguration represents an declarative configuration of the EgressServiceSpec type for use +// EgressServiceSpecApplyConfiguration represents a declarative configuration of the EgressServiceSpec type for use // with apply. type EgressServiceSpecApplyConfiguration struct { - SourceIPBy *v1.SourceIPMode `json:"sourceIPBy,omitempty"` - NodeSelector *metav1.LabelSelector `json:"nodeSelector,omitempty"` - Network *string `json:"network,omitempty"` + SourceIPBy *v1.SourceIPMode `json:"sourceIPBy,omitempty"` + NodeSelector *metav1.LabelSelectorApplyConfiguration `json:"nodeSelector,omitempty"` + Network *string `json:"network,omitempty"` } -// EgressServiceSpecApplyConfiguration constructs an declarative configuration of the EgressServiceSpec type for use with +// EgressServiceSpecApplyConfiguration constructs a declarative configuration of the EgressServiceSpec type for use with // apply. func EgressServiceSpec() *EgressServiceSpecApplyConfiguration { return &EgressServiceSpecApplyConfiguration{} @@ -47,8 +47,8 @@ func (b *EgressServiceSpecApplyConfiguration) WithSourceIPBy(value v1.SourceIPMo // WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NodeSelector field is set to the value of the last call. -func (b *EgressServiceSpecApplyConfiguration) WithNodeSelector(value metav1.LabelSelector) *EgressServiceSpecApplyConfiguration { - b.NodeSelector = &value +func (b *EgressServiceSpecApplyConfiguration) WithNodeSelector(value *metav1.LabelSelectorApplyConfiguration) *EgressServiceSpecApplyConfiguration { + b.NodeSelector = value return b } diff --git a/go-controller/pkg/crd/egressservice/v1/apis/applyconfiguration/egressservice/v1/egressservicestatus.go b/go-controller/pkg/crd/egressservice/v1/apis/applyconfiguration/egressservice/v1/egressservicestatus.go index 94d82a9ee7..40928d5f52 100644 --- a/go-controller/pkg/crd/egressservice/v1/apis/applyconfiguration/egressservice/v1/egressservicestatus.go +++ b/go-controller/pkg/crd/egressservice/v1/apis/applyconfiguration/egressservice/v1/egressservicestatus.go @@ -17,13 +17,13 @@ limitations under the License. package v1 -// EgressServiceStatusApplyConfiguration represents an declarative configuration of the EgressServiceStatus type for use +// EgressServiceStatusApplyConfiguration represents a declarative configuration of the EgressServiceStatus type for use // with apply. type EgressServiceStatusApplyConfiguration struct { Host *string `json:"host,omitempty"` } -// EgressServiceStatusApplyConfiguration constructs an declarative configuration of the EgressServiceStatus type for use with +// EgressServiceStatusApplyConfiguration constructs a declarative configuration of the EgressServiceStatus type for use with // apply. func EgressServiceStatus() *EgressServiceStatusApplyConfiguration { return &EgressServiceStatusApplyConfiguration{} diff --git a/go-controller/pkg/crd/egressservice/v1/apis/applyconfiguration/utils.go b/go-controller/pkg/crd/egressservice/v1/apis/applyconfiguration/utils.go index 3e72316a38..01cd6a9cda 100644 --- a/go-controller/pkg/crd/egressservice/v1/apis/applyconfiguration/utils.go +++ b/go-controller/pkg/crd/egressservice/v1/apis/applyconfiguration/utils.go @@ -20,7 +20,10 @@ package applyconfiguration import ( v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1" egressservicev1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/applyconfiguration/egressservice/v1" + internal "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/applyconfiguration/internal" + runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" ) // ForKind returns an apply configuration type for the given GroupVersionKind, or nil if no @@ -38,3 +41,7 @@ func ForKind(kind schema.GroupVersionKind) interface{} { } return nil } + +func NewTypeConverter(scheme *runtime.Scheme) *testing.TypeConverter { + return &testing.TypeConverter{Scheme: scheme, TypeResolver: internal.Parser()} +} diff --git a/go-controller/pkg/crd/egressservice/v1/apis/clientset/versioned/fake/clientset_generated.go b/go-controller/pkg/crd/egressservice/v1/apis/clientset/versioned/fake/clientset_generated.go index fcc43cee87..817db2690c 100644 --- a/go-controller/pkg/crd/egressservice/v1/apis/clientset/versioned/fake/clientset_generated.go +++ b/go-controller/pkg/crd/egressservice/v1/apis/clientset/versioned/fake/clientset_generated.go @@ -18,6 +18,7 @@ limitations under the License. package fake import ( + applyconfiguration "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/applyconfiguration" clientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/clientset/versioned" k8sv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/clientset/versioned/typed/egressservice/v1" fakek8sv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/clientset/versioned/typed/egressservice/v1/fake" @@ -30,8 +31,12 @@ import ( // NewSimpleClientset returns a clientset that will respond with the provided objects. // It's backed by a very simple object tracker that processes creates, updates and deletions as-is, -// without applying any validations and/or defaults. It shouldn't be considered a replacement +// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement // for a real clientset and is mostly useful in simple unit tests. +// +// DEPRECATED: NewClientset replaces this with support for field management, which significantly improves +// server side apply testing. NewClientset is only available when apply configurations are generated (e.g. +// via --with-applyconfig). func NewSimpleClientset(objects ...runtime.Object) *Clientset { o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) for _, obj := range objects { @@ -73,6 +78,38 @@ func (c *Clientset) Tracker() testing.ObjectTracker { return c.tracker } +// NewClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewClientset(objects ...runtime.Object) *Clientset { + o := testing.NewFieldManagedObjectTracker( + scheme, + codecs.UniversalDecoder(), + applyconfiguration.NewTypeConverter(scheme), + ) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + var ( _ clientset.Interface = &Clientset{} _ testing.FakeClient = &Clientset{} diff --git a/go-controller/pkg/crd/egressservice/v1/apis/clientset/versioned/typed/egressservice/v1/egressservice.go b/go-controller/pkg/crd/egressservice/v1/apis/clientset/versioned/typed/egressservice/v1/egressservice.go index d410988c5d..b68ba274cf 100644 --- a/go-controller/pkg/crd/egressservice/v1/apis/clientset/versioned/typed/egressservice/v1/egressservice.go +++ b/go-controller/pkg/crd/egressservice/v1/apis/clientset/versioned/typed/egressservice/v1/egressservice.go @@ -19,9 +19,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1" egressservicev1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/applyconfiguration/egressservice/v1" @@ -29,7 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // EgressServicesGetter has a method to return a EgressServiceInterface. @@ -42,6 +39,7 @@ type EgressServicesGetter interface { type EgressServiceInterface interface { Create(ctx context.Context, egressService *v1.EgressService, opts metav1.CreateOptions) (*v1.EgressService, error) Update(ctx context.Context, egressService *v1.EgressService, opts metav1.UpdateOptions) (*v1.EgressService, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, egressService *v1.EgressService, opts metav1.UpdateOptions) (*v1.EgressService, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -50,206 +48,25 @@ type EgressServiceInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EgressService, err error) Apply(ctx context.Context, egressService *egressservicev1.EgressServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.EgressService, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, egressService *egressservicev1.EgressServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.EgressService, err error) EgressServiceExpansion } // egressServices implements EgressServiceInterface type egressServices struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.EgressService, *v1.EgressServiceList, *egressservicev1.EgressServiceApplyConfiguration] } // newEgressServices returns a EgressServices func newEgressServices(c *K8sV1Client, namespace string) *egressServices { return &egressServices{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.EgressService, *v1.EgressServiceList, *egressservicev1.EgressServiceApplyConfiguration]( + "egressservices", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.EgressService { return &v1.EgressService{} }, + func() *v1.EgressServiceList { return &v1.EgressServiceList{} }), } } - -// Get takes name of the egressService, and returns the corresponding egressService object, and an error if there is any. -func (c *egressServices) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.EgressService, err error) { - result = &v1.EgressService{} - err = c.client.Get(). - Namespace(c.ns). - Resource("egressservices"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of EgressServices that match those selectors. -func (c *egressServices) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EgressServiceList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.EgressServiceList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("egressservices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested egressServices. -func (c *egressServices) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("egressservices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a egressService and creates it. Returns the server's representation of the egressService, and an error, if there is any. -func (c *egressServices) Create(ctx context.Context, egressService *v1.EgressService, opts metav1.CreateOptions) (result *v1.EgressService, err error) { - result = &v1.EgressService{} - err = c.client.Post(). - Namespace(c.ns). - Resource("egressservices"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(egressService). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a egressService and updates it. Returns the server's representation of the egressService, and an error, if there is any. -func (c *egressServices) Update(ctx context.Context, egressService *v1.EgressService, opts metav1.UpdateOptions) (result *v1.EgressService, err error) { - result = &v1.EgressService{} - err = c.client.Put(). - Namespace(c.ns). - Resource("egressservices"). - Name(egressService.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(egressService). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *egressServices) UpdateStatus(ctx context.Context, egressService *v1.EgressService, opts metav1.UpdateOptions) (result *v1.EgressService, err error) { - result = &v1.EgressService{} - err = c.client.Put(). - Namespace(c.ns). - Resource("egressservices"). - Name(egressService.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(egressService). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the egressService and deletes it. Returns an error if one occurs. -func (c *egressServices) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("egressservices"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *egressServices) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("egressservices"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched egressService. -func (c *egressServices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EgressService, err error) { - result = &v1.EgressService{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("egressservices"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied egressService. -func (c *egressServices) Apply(ctx context.Context, egressService *egressservicev1.EgressServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.EgressService, err error) { - if egressService == nil { - return nil, fmt.Errorf("egressService provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(egressService) - if err != nil { - return nil, err - } - name := egressService.Name - if name == nil { - return nil, fmt.Errorf("egressService.Name must be provided to Apply") - } - result = &v1.EgressService{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("egressservices"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *egressServices) ApplyStatus(ctx context.Context, egressService *egressservicev1.EgressServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.EgressService, err error) { - if egressService == nil { - return nil, fmt.Errorf("egressService provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(egressService) - if err != nil { - return nil, err - } - - name := egressService.Name - if name == nil { - return nil, fmt.Errorf("egressService.Name must be provided to Apply") - } - - result = &v1.EgressService{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("egressservices"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/go-controller/pkg/crd/egressservice/v1/apis/clientset/versioned/typed/egressservice/v1/fake/fake_egressservice.go b/go-controller/pkg/crd/egressservice/v1/apis/clientset/versioned/typed/egressservice/v1/fake/fake_egressservice.go index 6269004281..e709546898 100644 --- a/go-controller/pkg/crd/egressservice/v1/apis/clientset/versioned/typed/egressservice/v1/fake/fake_egressservice.go +++ b/go-controller/pkg/crd/egressservice/v1/apis/clientset/versioned/typed/egressservice/v1/fake/fake_egressservice.go @@ -43,22 +43,24 @@ var egressservicesKind = v1.SchemeGroupVersion.WithKind("EgressService") // Get takes name of the egressService, and returns the corresponding egressService object, and an error if there is any. func (c *FakeEgressServices) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.EgressService, err error) { + emptyResult := &v1.EgressService{} obj, err := c.Fake. - Invokes(testing.NewGetAction(egressservicesResource, c.ns, name), &v1.EgressService{}) + Invokes(testing.NewGetActionWithOptions(egressservicesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EgressService), err } // List takes label and field selectors, and returns the list of EgressServices that match those selectors. func (c *FakeEgressServices) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EgressServiceList, err error) { + emptyResult := &v1.EgressServiceList{} obj, err := c.Fake. - Invokes(testing.NewListAction(egressservicesResource, egressservicesKind, c.ns, opts), &v1.EgressServiceList{}) + Invokes(testing.NewListActionWithOptions(egressservicesResource, egressservicesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -77,40 +79,43 @@ func (c *FakeEgressServices) List(ctx context.Context, opts metav1.ListOptions) // Watch returns a watch.Interface that watches the requested egressServices. func (c *FakeEgressServices) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(egressservicesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(egressservicesResource, c.ns, opts)) } // Create takes the representation of a egressService and creates it. Returns the server's representation of the egressService, and an error, if there is any. func (c *FakeEgressServices) Create(ctx context.Context, egressService *v1.EgressService, opts metav1.CreateOptions) (result *v1.EgressService, err error) { + emptyResult := &v1.EgressService{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(egressservicesResource, c.ns, egressService), &v1.EgressService{}) + Invokes(testing.NewCreateActionWithOptions(egressservicesResource, c.ns, egressService, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EgressService), err } // Update takes the representation of a egressService and updates it. Returns the server's representation of the egressService, and an error, if there is any. func (c *FakeEgressServices) Update(ctx context.Context, egressService *v1.EgressService, opts metav1.UpdateOptions) (result *v1.EgressService, err error) { + emptyResult := &v1.EgressService{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(egressservicesResource, c.ns, egressService), &v1.EgressService{}) + Invokes(testing.NewUpdateActionWithOptions(egressservicesResource, c.ns, egressService, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EgressService), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeEgressServices) UpdateStatus(ctx context.Context, egressService *v1.EgressService, opts metav1.UpdateOptions) (*v1.EgressService, error) { +func (c *FakeEgressServices) UpdateStatus(ctx context.Context, egressService *v1.EgressService, opts metav1.UpdateOptions) (result *v1.EgressService, err error) { + emptyResult := &v1.EgressService{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(egressservicesResource, "status", c.ns, egressService), &v1.EgressService{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(egressservicesResource, "status", c.ns, egressService, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EgressService), err } @@ -125,7 +130,7 @@ func (c *FakeEgressServices) Delete(ctx context.Context, name string, opts metav // DeleteCollection deletes a collection of objects. func (c *FakeEgressServices) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(egressservicesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(egressservicesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.EgressServiceList{}) return err @@ -133,11 +138,12 @@ func (c *FakeEgressServices) DeleteCollection(ctx context.Context, opts metav1.D // Patch applies the patch and returns the patched egressService. func (c *FakeEgressServices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EgressService, err error) { + emptyResult := &v1.EgressService{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(egressservicesResource, c.ns, name, pt, data, subresources...), &v1.EgressService{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(egressservicesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EgressService), err } @@ -155,11 +161,12 @@ func (c *FakeEgressServices) Apply(ctx context.Context, egressService *egressser if name == nil { return nil, fmt.Errorf("egressService.Name must be provided to Apply") } + emptyResult := &v1.EgressService{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(egressservicesResource, c.ns, *name, types.ApplyPatchType, data), &v1.EgressService{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(egressservicesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EgressService), err } @@ -178,11 +185,12 @@ func (c *FakeEgressServices) ApplyStatus(ctx context.Context, egressService *egr if name == nil { return nil, fmt.Errorf("egressService.Name must be provided to Apply") } + emptyResult := &v1.EgressService{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(egressservicesResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.EgressService{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(egressservicesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EgressService), err } diff --git a/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go b/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go index 8d08e1cab3..f4d910561f 100644 --- a/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go +++ b/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go @@ -227,6 +227,7 @@ type SharedInformerFactory interface { // Start initializes all requested informers. They are handled in goroutines // which run until the stop channel gets closed. + // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. Start(stopCh <-chan struct{}) // Shutdown marks a factory as shutting down. At that point no new diff --git a/go-controller/pkg/crd/egressservice/v1/apis/listers/egressservice/v1/egressservice.go b/go-controller/pkg/crd/egressservice/v1/apis/listers/egressservice/v1/egressservice.go index aba1b0c106..b78ef65070 100644 --- a/go-controller/pkg/crd/egressservice/v1/apis/listers/egressservice/v1/egressservice.go +++ b/go-controller/pkg/crd/egressservice/v1/apis/listers/egressservice/v1/egressservice.go @@ -19,8 +19,8 @@ package v1 import ( v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -37,25 +37,17 @@ type EgressServiceLister interface { // egressServiceLister implements the EgressServiceLister interface. type egressServiceLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.EgressService] } // NewEgressServiceLister returns a new EgressServiceLister. func NewEgressServiceLister(indexer cache.Indexer) EgressServiceLister { - return &egressServiceLister{indexer: indexer} -} - -// List lists all EgressServices in the indexer. -func (s *egressServiceLister) List(selector labels.Selector) (ret []*v1.EgressService, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.EgressService)) - }) - return ret, err + return &egressServiceLister{listers.New[*v1.EgressService](indexer, v1.Resource("egressservice"))} } // EgressServices returns an object that can list and get EgressServices. func (s *egressServiceLister) EgressServices(namespace string) EgressServiceNamespaceLister { - return egressServiceNamespaceLister{indexer: s.indexer, namespace: namespace} + return egressServiceNamespaceLister{listers.NewNamespaced[*v1.EgressService](s.ResourceIndexer, namespace)} } // EgressServiceNamespaceLister helps list and get EgressServices. @@ -73,26 +65,5 @@ type EgressServiceNamespaceLister interface { // egressServiceNamespaceLister implements the EgressServiceNamespaceLister // interface. type egressServiceNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all EgressServices in the indexer for a given namespace. -func (s egressServiceNamespaceLister) List(selector labels.Selector) (ret []*v1.EgressService, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.EgressService)) - }) - return ret, err -} - -// Get retrieves the EgressService from the indexer for a given namespace and name. -func (s egressServiceNamespaceLister) Get(name string) (*v1.EgressService, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("egressservice"), name) - } - return obj.(*v1.EgressService), nil + listers.ResourceIndexer[*v1.EgressService] } diff --git a/go-controller/pkg/crd/routeadvertisements/v1/apis/applyconfiguration/internal/internal.go b/go-controller/pkg/crd/routeadvertisements/v1/apis/applyconfiguration/internal/internal.go new file mode 100644 index 0000000000..765bec46ab --- /dev/null +++ b/go-controller/pkg/crd/routeadvertisements/v1/apis/applyconfiguration/internal/internal.go @@ -0,0 +1,61 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package internal + +import ( + "fmt" + "sync" + + typed "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +func Parser() *typed.Parser { + parserOnce.Do(func() { + var err error + parser, err = typed.NewParser(schemaYAML) + if err != nil { + panic(fmt.Sprintf("Failed to parse schema: %v", err)) + } + }) + return parser +} + +var parserOnce sync.Once +var parser *typed.Parser +var schemaYAML = typed.YAMLObject(`types: +- name: __untyped_atomic_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic +- name: __untyped_deduced_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +`) diff --git a/go-controller/pkg/crd/routeadvertisements/v1/apis/applyconfiguration/routeadvertisements/v1/advertisements.go b/go-controller/pkg/crd/routeadvertisements/v1/apis/applyconfiguration/routeadvertisements/v1/advertisements.go new file mode 100644 index 0000000000..49300610c3 --- /dev/null +++ b/go-controller/pkg/crd/routeadvertisements/v1/apis/applyconfiguration/routeadvertisements/v1/advertisements.go @@ -0,0 +1,47 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AdvertisementsApplyConfiguration represents a declarative configuration of the Advertisements type for use +// with apply. +type AdvertisementsApplyConfiguration struct { + PodNetwork *bool `json:"podNetwork,omitempty"` + EgressIP *bool `json:"egressIP,omitempty"` +} + +// AdvertisementsApplyConfiguration constructs a declarative configuration of the Advertisements type for use with +// apply. +func Advertisements() *AdvertisementsApplyConfiguration { + return &AdvertisementsApplyConfiguration{} +} + +// WithPodNetwork sets the PodNetwork field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PodNetwork field is set to the value of the last call. +func (b *AdvertisementsApplyConfiguration) WithPodNetwork(value bool) *AdvertisementsApplyConfiguration { + b.PodNetwork = &value + return b +} + +// WithEgressIP sets the EgressIP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EgressIP field is set to the value of the last call. +func (b *AdvertisementsApplyConfiguration) WithEgressIP(value bool) *AdvertisementsApplyConfiguration { + b.EgressIP = &value + return b +} diff --git a/go-controller/pkg/crd/routeadvertisements/v1/apis/applyconfiguration/routeadvertisements/v1/routeadvertisements.go b/go-controller/pkg/crd/routeadvertisements/v1/apis/applyconfiguration/routeadvertisements/v1/routeadvertisements.go new file mode 100644 index 0000000000..6daf8339c0 --- /dev/null +++ b/go-controller/pkg/crd/routeadvertisements/v1/apis/applyconfiguration/routeadvertisements/v1/routeadvertisements.go @@ -0,0 +1,223 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// RouteAdvertisementsApplyConfiguration represents a declarative configuration of the RouteAdvertisements type for use +// with apply. +type RouteAdvertisementsApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *RouteAdvertisementsSpecApplyConfiguration `json:"spec,omitempty"` + Status *RouteAdvertisementsStatusApplyConfiguration `json:"status,omitempty"` +} + +// RouteAdvertisements constructs a declarative configuration of the RouteAdvertisements type for use with +// apply. +func RouteAdvertisements(name string) *RouteAdvertisementsApplyConfiguration { + b := &RouteAdvertisementsApplyConfiguration{} + b.WithName(name) + b.WithKind("RouteAdvertisements") + b.WithAPIVersion("k8s.ovn.org/v1") + return b +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *RouteAdvertisementsApplyConfiguration) WithKind(value string) *RouteAdvertisementsApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *RouteAdvertisementsApplyConfiguration) WithAPIVersion(value string) *RouteAdvertisementsApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *RouteAdvertisementsApplyConfiguration) WithName(value string) *RouteAdvertisementsApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *RouteAdvertisementsApplyConfiguration) WithGenerateName(value string) *RouteAdvertisementsApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *RouteAdvertisementsApplyConfiguration) WithNamespace(value string) *RouteAdvertisementsApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *RouteAdvertisementsApplyConfiguration) WithUID(value types.UID) *RouteAdvertisementsApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *RouteAdvertisementsApplyConfiguration) WithResourceVersion(value string) *RouteAdvertisementsApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *RouteAdvertisementsApplyConfiguration) WithGeneration(value int64) *RouteAdvertisementsApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *RouteAdvertisementsApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RouteAdvertisementsApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *RouteAdvertisementsApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RouteAdvertisementsApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *RouteAdvertisementsApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RouteAdvertisementsApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *RouteAdvertisementsApplyConfiguration) WithLabels(entries map[string]string) *RouteAdvertisementsApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *RouteAdvertisementsApplyConfiguration) WithAnnotations(entries map[string]string) *RouteAdvertisementsApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *RouteAdvertisementsApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *RouteAdvertisementsApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *RouteAdvertisementsApplyConfiguration) WithFinalizers(values ...string) *RouteAdvertisementsApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *RouteAdvertisementsApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *RouteAdvertisementsApplyConfiguration) WithSpec(value *RouteAdvertisementsSpecApplyConfiguration) *RouteAdvertisementsApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *RouteAdvertisementsApplyConfiguration) WithStatus(value *RouteAdvertisementsStatusApplyConfiguration) *RouteAdvertisementsApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RouteAdvertisementsApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/go-controller/pkg/crd/routeadvertisements/v1/apis/applyconfiguration/routeadvertisements/v1/routeadvertisementsspec.go b/go-controller/pkg/crd/routeadvertisements/v1/apis/applyconfiguration/routeadvertisements/v1/routeadvertisementsspec.go new file mode 100644 index 0000000000..4c9f963870 --- /dev/null +++ b/go-controller/pkg/crd/routeadvertisements/v1/apis/applyconfiguration/routeadvertisements/v1/routeadvertisementsspec.go @@ -0,0 +1,81 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + routeadvertisementsv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// RouteAdvertisementsSpecApplyConfiguration represents a declarative configuration of the RouteAdvertisementsSpec type for use +// with apply. +type RouteAdvertisementsSpecApplyConfiguration struct { + TargetVRF *string `json:"targetVRF,omitempty"` + NetworkSelector *v1.LabelSelectorApplyConfiguration `json:"networkSelector,omitempty"` + NodeSelector *v1.LabelSelectorApplyConfiguration `json:"nodeSelector,omitempty"` + FRRConfigurationSelector *v1.LabelSelectorApplyConfiguration `json:"frrConfigurationSelector,omitempty"` + Advertisements []routeadvertisementsv1.AdvertisementType `json:"advertisements,omitempty"` +} + +// RouteAdvertisementsSpecApplyConfiguration constructs a declarative configuration of the RouteAdvertisementsSpec type for use with +// apply. +func RouteAdvertisementsSpec() *RouteAdvertisementsSpecApplyConfiguration { + return &RouteAdvertisementsSpecApplyConfiguration{} +} + +// WithTargetVRF sets the TargetVRF field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TargetVRF field is set to the value of the last call. +func (b *RouteAdvertisementsSpecApplyConfiguration) WithTargetVRF(value string) *RouteAdvertisementsSpecApplyConfiguration { + b.TargetVRF = &value + return b +} + +// WithNetworkSelector sets the NetworkSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NetworkSelector field is set to the value of the last call. +func (b *RouteAdvertisementsSpecApplyConfiguration) WithNetworkSelector(value *v1.LabelSelectorApplyConfiguration) *RouteAdvertisementsSpecApplyConfiguration { + b.NetworkSelector = value + return b +} + +// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeSelector field is set to the value of the last call. +func (b *RouteAdvertisementsSpecApplyConfiguration) WithNodeSelector(value *v1.LabelSelectorApplyConfiguration) *RouteAdvertisementsSpecApplyConfiguration { + b.NodeSelector = value + return b +} + +// WithFRRConfigurationSelector sets the FRRConfigurationSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FRRConfigurationSelector field is set to the value of the last call. +func (b *RouteAdvertisementsSpecApplyConfiguration) WithFRRConfigurationSelector(value *v1.LabelSelectorApplyConfiguration) *RouteAdvertisementsSpecApplyConfiguration { + b.FRRConfigurationSelector = value + return b +} + +// WithAdvertisements adds the given value to the Advertisements field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Advertisements field. +func (b *RouteAdvertisementsSpecApplyConfiguration) WithAdvertisements(values ...routeadvertisementsv1.AdvertisementType) *RouteAdvertisementsSpecApplyConfiguration { + for i := range values { + b.Advertisements = append(b.Advertisements, values[i]) + } + return b +} diff --git a/go-controller/pkg/crd/routeadvertisements/v1/apis/applyconfiguration/routeadvertisements/v1/routeadvertisementsstatus.go b/go-controller/pkg/crd/routeadvertisements/v1/apis/applyconfiguration/routeadvertisements/v1/routeadvertisementsstatus.go new file mode 100644 index 0000000000..c1cdf054c1 --- /dev/null +++ b/go-controller/pkg/crd/routeadvertisements/v1/apis/applyconfiguration/routeadvertisements/v1/routeadvertisementsstatus.go @@ -0,0 +1,56 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// RouteAdvertisementsStatusApplyConfiguration represents a declarative configuration of the RouteAdvertisementsStatus type for use +// with apply. +type RouteAdvertisementsStatusApplyConfiguration struct { + Status *string `json:"status,omitempty"` + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// RouteAdvertisementsStatusApplyConfiguration constructs a declarative configuration of the RouteAdvertisementsStatus type for use with +// apply. +func RouteAdvertisementsStatus() *RouteAdvertisementsStatusApplyConfiguration { + return &RouteAdvertisementsStatusApplyConfiguration{} +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *RouteAdvertisementsStatusApplyConfiguration) WithStatus(value string) *RouteAdvertisementsStatusApplyConfiguration { + b.Status = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *RouteAdvertisementsStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *RouteAdvertisementsStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/go-controller/pkg/crd/routeadvertisements/v1/apis/applyconfiguration/utils.go b/go-controller/pkg/crd/routeadvertisements/v1/apis/applyconfiguration/utils.go new file mode 100644 index 0000000000..8adccb48db --- /dev/null +++ b/go-controller/pkg/crd/routeadvertisements/v1/apis/applyconfiguration/utils.go @@ -0,0 +1,47 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package applyconfiguration + +import ( + v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1" + internal "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/applyconfiguration/internal" + routeadvertisementsv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/applyconfiguration/routeadvertisements/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + +// ForKind returns an apply configuration type for the given GroupVersionKind, or nil if no +// apply configuration type exists for the given GroupVersionKind. +func ForKind(kind schema.GroupVersionKind) interface{} { + switch kind { + // Group=k8s.ovn.org, Version=v1 + case v1.SchemeGroupVersion.WithKind("RouteAdvertisements"): + return &routeadvertisementsv1.RouteAdvertisementsApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("RouteAdvertisementsSpec"): + return &routeadvertisementsv1.RouteAdvertisementsSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("RouteAdvertisementsStatus"): + return &routeadvertisementsv1.RouteAdvertisementsStatusApplyConfiguration{} + + } + return nil +} + +func NewTypeConverter(scheme *runtime.Scheme) *testing.TypeConverter { + return &testing.TypeConverter{Scheme: scheme, TypeResolver: internal.Parser()} +} diff --git a/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/clientset.go b/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/clientset.go new file mode 100644 index 0000000000..c7df1c566a --- /dev/null +++ b/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/clientset.go @@ -0,0 +1,119 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + "fmt" + "net/http" + + k8sv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/typed/routeadvertisements/v1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + K8sV1() k8sv1.K8sV1Interface +} + +// Clientset contains the clients for groups. +type Clientset struct { + *discovery.DiscoveryClient + k8sV1 *k8sv1.K8sV1Client +} + +// K8sV1 retrieves the K8sV1Client +func (c *Clientset) K8sV1() k8sv1.K8sV1Interface { + return c.k8sV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.k8sV1, err = k8sv1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.k8sV1 = k8sv1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/fake/clientset_generated.go b/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 0000000000..2dc1da7905 --- /dev/null +++ b/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,121 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + applyconfiguration "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/applyconfiguration" + clientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned" + k8sv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/typed/routeadvertisements/v1" + fakek8sv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/typed/routeadvertisements/v1/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +// +// DEPRECATED: NewClientset replaces this with support for field management, which significantly improves +// server side apply testing. NewClientset is only available when apply configurations are generated (e.g. +// via --with-applyconfig). +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + +// NewClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewClientset(objects ...runtime.Object) *Clientset { + o := testing.NewFieldManagedObjectTracker( + scheme, + codecs.UniversalDecoder(), + applyconfiguration.NewTypeConverter(scheme), + ) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +var ( + _ clientset.Interface = &Clientset{} + _ testing.FakeClient = &Clientset{} +) + +// K8sV1 retrieves the K8sV1Client +func (c *Clientset) K8sV1() k8sv1.K8sV1Interface { + return &fakek8sv1.FakeK8sV1{Fake: &c.Fake} +} diff --git a/go-controller/vendor/sigs.k8s.io/controller-runtime/pkg/ratelimiter/doc.go b/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/fake/doc.go similarity index 66% rename from go-controller/vendor/sigs.k8s.io/controller-runtime/pkg/ratelimiter/doc.go rename to go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/fake/doc.go index a01d603fe5..19e0028ffb 100644 --- a/go-controller/vendor/sigs.k8s.io/controller-runtime/pkg/ratelimiter/doc.go +++ b/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Kubernetes Authors. + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,10 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. -/* -Package ratelimiter defines rate limiters used by Controllers to limit how frequently requests may be queued. - -Typical rate limiters that can be used are implemented in client-go's workqueue package. -*/ -package ratelimiter +// This package has the automatically generated fake clientset. +package fake diff --git a/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/fake/register.go b/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/fake/register.go new file mode 100644 index 0000000000..2d9338857a --- /dev/null +++ b/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/fake/register.go @@ -0,0 +1,55 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + k8sv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) + +var localSchemeBuilder = runtime.SchemeBuilder{ + k8sv1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/scheme/doc.go b/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/scheme/doc.go new file mode 100644 index 0000000000..1aec4021fc --- /dev/null +++ b/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/scheme/doc.go @@ -0,0 +1,19 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/scheme/register.go b/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/scheme/register.go new file mode 100644 index 0000000000..56e19ffc5e --- /dev/null +++ b/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/scheme/register.go @@ -0,0 +1,55 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + k8sv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + k8sv1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/typed/routeadvertisements/v1/doc.go b/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/typed/routeadvertisements/v1/doc.go new file mode 100644 index 0000000000..b22b05acdb --- /dev/null +++ b/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/typed/routeadvertisements/v1/doc.go @@ -0,0 +1,19 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/typed/routeadvertisements/v1/fake/doc.go b/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/typed/routeadvertisements/v1/fake/doc.go new file mode 100644 index 0000000000..422564f2d5 --- /dev/null +++ b/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/typed/routeadvertisements/v1/fake/doc.go @@ -0,0 +1,19 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/typed/routeadvertisements/v1/fake/fake_routeadvertisements.go b/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/typed/routeadvertisements/v1/fake/fake_routeadvertisements.go new file mode 100644 index 0000000000..a4035f6b1c --- /dev/null +++ b/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/typed/routeadvertisements/v1/fake/fake_routeadvertisements.go @@ -0,0 +1,185 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1" + routeadvertisementsv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/applyconfiguration/routeadvertisements/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeRouteAdvertisements implements RouteAdvertisementsInterface +type FakeRouteAdvertisements struct { + Fake *FakeK8sV1 +} + +var routeadvertisementsResource = v1.SchemeGroupVersion.WithResource("routeadvertisements") + +var routeadvertisementsKind = v1.SchemeGroupVersion.WithKind("RouteAdvertisements") + +// Get takes name of the routeAdvertisements, and returns the corresponding routeAdvertisements object, and an error if there is any. +func (c *FakeRouteAdvertisements) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RouteAdvertisements, err error) { + emptyResult := &v1.RouteAdvertisements{} + obj, err := c.Fake. + Invokes(testing.NewRootGetActionWithOptions(routeadvertisementsResource, name, options), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.RouteAdvertisements), err +} + +// List takes label and field selectors, and returns the list of RouteAdvertisements that match those selectors. +func (c *FakeRouteAdvertisements) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RouteAdvertisementsList, err error) { + emptyResult := &v1.RouteAdvertisementsList{} + obj, err := c.Fake. + Invokes(testing.NewRootListActionWithOptions(routeadvertisementsResource, routeadvertisementsKind, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.RouteAdvertisementsList{ListMeta: obj.(*v1.RouteAdvertisementsList).ListMeta} + for _, item := range obj.(*v1.RouteAdvertisementsList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested routeAdvertisements. +func (c *FakeRouteAdvertisements) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchActionWithOptions(routeadvertisementsResource, opts)) +} + +// Create takes the representation of a routeAdvertisements and creates it. Returns the server's representation of the routeAdvertisements, and an error, if there is any. +func (c *FakeRouteAdvertisements) Create(ctx context.Context, routeAdvertisements *v1.RouteAdvertisements, opts metav1.CreateOptions) (result *v1.RouteAdvertisements, err error) { + emptyResult := &v1.RouteAdvertisements{} + obj, err := c.Fake. + Invokes(testing.NewRootCreateActionWithOptions(routeadvertisementsResource, routeAdvertisements, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.RouteAdvertisements), err +} + +// Update takes the representation of a routeAdvertisements and updates it. Returns the server's representation of the routeAdvertisements, and an error, if there is any. +func (c *FakeRouteAdvertisements) Update(ctx context.Context, routeAdvertisements *v1.RouteAdvertisements, opts metav1.UpdateOptions) (result *v1.RouteAdvertisements, err error) { + emptyResult := &v1.RouteAdvertisements{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateActionWithOptions(routeadvertisementsResource, routeAdvertisements, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.RouteAdvertisements), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeRouteAdvertisements) UpdateStatus(ctx context.Context, routeAdvertisements *v1.RouteAdvertisements, opts metav1.UpdateOptions) (result *v1.RouteAdvertisements, err error) { + emptyResult := &v1.RouteAdvertisements{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(routeadvertisementsResource, "status", routeAdvertisements, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.RouteAdvertisements), err +} + +// Delete takes name of the routeAdvertisements and deletes it. Returns an error if one occurs. +func (c *FakeRouteAdvertisements) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(routeadvertisementsResource, name, opts), &v1.RouteAdvertisements{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeRouteAdvertisements) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + action := testing.NewRootDeleteCollectionActionWithOptions(routeadvertisementsResource, opts, listOpts) + + _, err := c.Fake.Invokes(action, &v1.RouteAdvertisementsList{}) + return err +} + +// Patch applies the patch and returns the patched routeAdvertisements. +func (c *FakeRouteAdvertisements) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RouteAdvertisements, err error) { + emptyResult := &v1.RouteAdvertisements{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(routeadvertisementsResource, name, pt, data, opts, subresources...), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.RouteAdvertisements), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied routeAdvertisements. +func (c *FakeRouteAdvertisements) Apply(ctx context.Context, routeAdvertisements *routeadvertisementsv1.RouteAdvertisementsApplyConfiguration, opts metav1.ApplyOptions) (result *v1.RouteAdvertisements, err error) { + if routeAdvertisements == nil { + return nil, fmt.Errorf("routeAdvertisements provided to Apply must not be nil") + } + data, err := json.Marshal(routeAdvertisements) + if err != nil { + return nil, err + } + name := routeAdvertisements.Name + if name == nil { + return nil, fmt.Errorf("routeAdvertisements.Name must be provided to Apply") + } + emptyResult := &v1.RouteAdvertisements{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(routeadvertisementsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.RouteAdvertisements), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeRouteAdvertisements) ApplyStatus(ctx context.Context, routeAdvertisements *routeadvertisementsv1.RouteAdvertisementsApplyConfiguration, opts metav1.ApplyOptions) (result *v1.RouteAdvertisements, err error) { + if routeAdvertisements == nil { + return nil, fmt.Errorf("routeAdvertisements provided to Apply must not be nil") + } + data, err := json.Marshal(routeAdvertisements) + if err != nil { + return nil, err + } + name := routeAdvertisements.Name + if name == nil { + return nil, fmt.Errorf("routeAdvertisements.Name must be provided to Apply") + } + emptyResult := &v1.RouteAdvertisements{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(routeadvertisementsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.RouteAdvertisements), err +} diff --git a/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/typed/routeadvertisements/v1/fake/fake_routeadvertisements_client.go b/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/typed/routeadvertisements/v1/fake/fake_routeadvertisements_client.go new file mode 100644 index 0000000000..b206303e52 --- /dev/null +++ b/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/typed/routeadvertisements/v1/fake/fake_routeadvertisements_client.go @@ -0,0 +1,39 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/typed/routeadvertisements/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeK8sV1 struct { + *testing.Fake +} + +func (c *FakeK8sV1) RouteAdvertisements() v1.RouteAdvertisementsInterface { + return &FakeRouteAdvertisements{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeK8sV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/typed/routeadvertisements/v1/generated_expansion.go b/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/typed/routeadvertisements/v1/generated_expansion.go new file mode 100644 index 0000000000..ee5bb02e97 --- /dev/null +++ b/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/typed/routeadvertisements/v1/generated_expansion.go @@ -0,0 +1,20 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type RouteAdvertisementsExpansion interface{} diff --git a/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/typed/routeadvertisements/v1/routeadvertisements.go b/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/typed/routeadvertisements/v1/routeadvertisements.go new file mode 100644 index 0000000000..deb25409ae --- /dev/null +++ b/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/typed/routeadvertisements/v1/routeadvertisements.go @@ -0,0 +1,72 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + + v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1" + routeadvertisementsv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/applyconfiguration/routeadvertisements/v1" + scheme "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// RouteAdvertisementsGetter has a method to return a RouteAdvertisementsInterface. +// A group's client should implement this interface. +type RouteAdvertisementsGetter interface { + RouteAdvertisements() RouteAdvertisementsInterface +} + +// RouteAdvertisementsInterface has methods to work with RouteAdvertisements resources. +type RouteAdvertisementsInterface interface { + Create(ctx context.Context, routeAdvertisements *v1.RouteAdvertisements, opts metav1.CreateOptions) (*v1.RouteAdvertisements, error) + Update(ctx context.Context, routeAdvertisements *v1.RouteAdvertisements, opts metav1.UpdateOptions) (*v1.RouteAdvertisements, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, routeAdvertisements *v1.RouteAdvertisements, opts metav1.UpdateOptions) (*v1.RouteAdvertisements, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.RouteAdvertisements, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.RouteAdvertisementsList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RouteAdvertisements, err error) + Apply(ctx context.Context, routeAdvertisements *routeadvertisementsv1.RouteAdvertisementsApplyConfiguration, opts metav1.ApplyOptions) (result *v1.RouteAdvertisements, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, routeAdvertisements *routeadvertisementsv1.RouteAdvertisementsApplyConfiguration, opts metav1.ApplyOptions) (result *v1.RouteAdvertisements, err error) + RouteAdvertisementsExpansion +} + +// routeAdvertisements implements RouteAdvertisementsInterface +type routeAdvertisements struct { + *gentype.ClientWithListAndApply[*v1.RouteAdvertisements, *v1.RouteAdvertisementsList, *routeadvertisementsv1.RouteAdvertisementsApplyConfiguration] +} + +// newRouteAdvertisements returns a RouteAdvertisements +func newRouteAdvertisements(c *K8sV1Client) *routeAdvertisements { + return &routeAdvertisements{ + gentype.NewClientWithListAndApply[*v1.RouteAdvertisements, *v1.RouteAdvertisementsList, *routeadvertisementsv1.RouteAdvertisementsApplyConfiguration]( + "routeadvertisements", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.RouteAdvertisements { return &v1.RouteAdvertisements{} }, + func() *v1.RouteAdvertisementsList { return &v1.RouteAdvertisementsList{} }), + } +} diff --git a/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/typed/routeadvertisements/v1/routeadvertisements_client.go b/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/typed/routeadvertisements/v1/routeadvertisements_client.go new file mode 100644 index 0000000000..2deba95d5a --- /dev/null +++ b/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/typed/routeadvertisements/v1/routeadvertisements_client.go @@ -0,0 +1,106 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "net/http" + + v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type K8sV1Interface interface { + RESTClient() rest.Interface + RouteAdvertisementsGetter +} + +// K8sV1Client is used to interact with features provided by the k8s.ovn.org group. +type K8sV1Client struct { + restClient rest.Interface +} + +func (c *K8sV1Client) RouteAdvertisements() RouteAdvertisementsInterface { + return newRouteAdvertisements(c) +} + +// NewForConfig creates a new K8sV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*K8sV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new K8sV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*K8sV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &K8sV1Client{client}, nil +} + +// NewForConfigOrDie creates a new K8sV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *K8sV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new K8sV1Client for the given RESTClient. +func New(c rest.Interface) *K8sV1Client { + return &K8sV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *K8sV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/go-controller/pkg/crd/routeadvertisements/v1/apis/informers/externalversions/factory.go b/go-controller/pkg/crd/routeadvertisements/v1/apis/informers/externalversions/factory.go new file mode 100644 index 0000000000..d672b40db6 --- /dev/null +++ b/go-controller/pkg/crd/routeadvertisements/v1/apis/informers/externalversions/factory.go @@ -0,0 +1,261 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned" + internalinterfaces "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/informers/externalversions/internalinterfaces" + routeadvertisements "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/informers/externalversions/routeadvertisements" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + transform cache.TransformFunc + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool + // wg tracks how many goroutines were started. + wg sync.WaitGroup + // shuttingDown is true when Shutdown has been called. It may still be running + // because it needs to wait for goroutines. + shuttingDown bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// WithTransform sets a transform on all informers. +func WithTransform(transform cache.TransformFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.transform = transform + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.shuttingDown { + return + } + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + f.wg.Add(1) + // We need a new variable in each loop iteration, + // otherwise the goroutine would use the loop variable + // and that keeps changing. + informer := informer + go func() { + defer f.wg.Done() + informer.Run(stopCh) + }() + f.startedInformers[informerType] = true + } + } +} + +func (f *sharedInformerFactory) Shutdown() { + f.lock.Lock() + f.shuttingDown = true + f.lock.Unlock() + + // Will return immediately if there is nothing to wait for. + f.wg.Wait() +} + +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + informer.SetTransform(f.transform) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +// +// It is typically used like this: +// +// ctx, cancel := context.Background() +// defer cancel() +// factory := NewSharedInformerFactory(client, resyncPeriod) +// defer factory.WaitForStop() // Returns immediately if nothing was started. +// genericInformer := factory.ForResource(resource) +// typedInformer := factory.SomeAPIGroup().V1().SomeType() +// factory.Start(ctx.Done()) // Start processing these informers. +// synced := factory.WaitForCacheSync(ctx.Done()) +// for v, ok := range synced { +// if !ok { +// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v) +// return +// } +// } +// +// // Creating informers can also be created after Start, but then +// // Start must be called again: +// anotherGenericInformer := factory.ForResource(resource) +// factory.Start(ctx.Done()) +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + + // Start initializes all requested informers. They are handled in goroutines + // which run until the stop channel gets closed. + // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. + Start(stopCh <-chan struct{}) + + // Shutdown marks a factory as shutting down. At that point no new + // informers can be started anymore and Start will return without + // doing anything. + // + // In addition, Shutdown blocks until all goroutines have terminated. For that + // to happen, the close channel(s) that they were started with must be closed, + // either before Shutdown gets called or while it is waiting. + // + // Shutdown may be called multiple times, even concurrently. All such calls will + // block until all goroutines have terminated. + Shutdown() + + // WaitForCacheSync blocks until all started informers' caches were synced + // or the stop channel gets closed. + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + // ForResource gives generic access to a shared informer of the matching type. + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // InformerFor returns the SharedIndexInformer for obj using an internal + // client. + InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer + + K8s() routeadvertisements.Interface +} + +func (f *sharedInformerFactory) K8s() routeadvertisements.Interface { + return routeadvertisements.New(f, f.namespace, f.tweakListOptions) +} diff --git a/go-controller/pkg/crd/routeadvertisements/v1/apis/informers/externalversions/generic.go b/go-controller/pkg/crd/routeadvertisements/v1/apis/informers/externalversions/generic.go new file mode 100644 index 0000000000..53c719b0c1 --- /dev/null +++ b/go-controller/pkg/crd/routeadvertisements/v1/apis/informers/externalversions/generic.go @@ -0,0 +1,61 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + "fmt" + + v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=k8s.ovn.org, Version=v1 + case v1.SchemeGroupVersion.WithResource("routeadvertisements"): + return &genericInformer{resource: resource.GroupResource(), informer: f.K8s().V1().RouteAdvertisements().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/go-controller/pkg/crd/routeadvertisements/v1/apis/informers/externalversions/internalinterfaces/factory_interfaces.go b/go-controller/pkg/crd/routeadvertisements/v1/apis/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 0000000000..456872214c --- /dev/null +++ b/go-controller/pkg/crd/routeadvertisements/v1/apis/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,39 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/go-controller/pkg/crd/routeadvertisements/v1/apis/informers/externalversions/routeadvertisements/interface.go b/go-controller/pkg/crd/routeadvertisements/v1/apis/informers/externalversions/routeadvertisements/interface.go new file mode 100644 index 0000000000..83be9609eb --- /dev/null +++ b/go-controller/pkg/crd/routeadvertisements/v1/apis/informers/externalversions/routeadvertisements/interface.go @@ -0,0 +1,45 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by informer-gen. DO NOT EDIT. + +package routeadvertisements + +import ( + internalinterfaces "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/informers/externalversions/internalinterfaces" + v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/informers/externalversions/routeadvertisements/v1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/go-controller/pkg/crd/routeadvertisements/v1/apis/informers/externalversions/routeadvertisements/v1/interface.go b/go-controller/pkg/crd/routeadvertisements/v1/apis/informers/externalversions/routeadvertisements/v1/interface.go new file mode 100644 index 0000000000..5158d79310 --- /dev/null +++ b/go-controller/pkg/crd/routeadvertisements/v1/apis/informers/externalversions/routeadvertisements/v1/interface.go @@ -0,0 +1,44 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // RouteAdvertisements returns a RouteAdvertisementsInformer. + RouteAdvertisements() RouteAdvertisementsInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// RouteAdvertisements returns a RouteAdvertisementsInformer. +func (v *version) RouteAdvertisements() RouteAdvertisementsInformer { + return &routeAdvertisementsInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/go-controller/pkg/crd/routeadvertisements/v1/apis/informers/externalversions/routeadvertisements/v1/routeadvertisements.go b/go-controller/pkg/crd/routeadvertisements/v1/apis/informers/externalversions/routeadvertisements/v1/routeadvertisements.go new file mode 100644 index 0000000000..273b67a8e9 --- /dev/null +++ b/go-controller/pkg/crd/routeadvertisements/v1/apis/informers/externalversions/routeadvertisements/v1/routeadvertisements.go @@ -0,0 +1,88 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + routeadvertisementsv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1" + versioned "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned" + internalinterfaces "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/informers/externalversions/internalinterfaces" + v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/listers/routeadvertisements/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// RouteAdvertisementsInformer provides access to a shared informer and lister for +// RouteAdvertisements. +type RouteAdvertisementsInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.RouteAdvertisementsLister +} + +type routeAdvertisementsInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewRouteAdvertisementsInformer constructs a new informer for RouteAdvertisements type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRouteAdvertisementsInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRouteAdvertisementsInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredRouteAdvertisementsInformer constructs a new informer for RouteAdvertisements type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRouteAdvertisementsInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.K8sV1().RouteAdvertisements().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.K8sV1().RouteAdvertisements().Watch(context.TODO(), options) + }, + }, + &routeadvertisementsv1.RouteAdvertisements{}, + resyncPeriod, + indexers, + ) +} + +func (f *routeAdvertisementsInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRouteAdvertisementsInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *routeAdvertisementsInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&routeadvertisementsv1.RouteAdvertisements{}, f.defaultInformer) +} + +func (f *routeAdvertisementsInformer) Lister() v1.RouteAdvertisementsLister { + return v1.NewRouteAdvertisementsLister(f.Informer().GetIndexer()) +} diff --git a/go-controller/pkg/crd/routeadvertisements/v1/apis/listers/routeadvertisements/v1/expansion_generated.go b/go-controller/pkg/crd/routeadvertisements/v1/apis/listers/routeadvertisements/v1/expansion_generated.go new file mode 100644 index 0000000000..2d2b9b7add --- /dev/null +++ b/go-controller/pkg/crd/routeadvertisements/v1/apis/listers/routeadvertisements/v1/expansion_generated.go @@ -0,0 +1,22 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// RouteAdvertisementsListerExpansion allows custom methods to be added to +// RouteAdvertisementsLister. +type RouteAdvertisementsListerExpansion interface{} diff --git a/go-controller/pkg/crd/routeadvertisements/v1/apis/listers/routeadvertisements/v1/routeadvertisements.go b/go-controller/pkg/crd/routeadvertisements/v1/apis/listers/routeadvertisements/v1/routeadvertisements.go new file mode 100644 index 0000000000..e250e910ee --- /dev/null +++ b/go-controller/pkg/crd/routeadvertisements/v1/apis/listers/routeadvertisements/v1/routeadvertisements.go @@ -0,0 +1,47 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" + "k8s.io/client-go/tools/cache" +) + +// RouteAdvertisementsLister helps list RouteAdvertisements. +// All objects returned here must be treated as read-only. +type RouteAdvertisementsLister interface { + // List lists all RouteAdvertisements in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.RouteAdvertisements, err error) + // Get retrieves the RouteAdvertisements from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.RouteAdvertisements, error) + RouteAdvertisementsListerExpansion +} + +// routeAdvertisementsLister implements the RouteAdvertisementsLister interface. +type routeAdvertisementsLister struct { + listers.ResourceIndexer[*v1.RouteAdvertisements] +} + +// NewRouteAdvertisementsLister returns a new RouteAdvertisementsLister. +func NewRouteAdvertisementsLister(indexer cache.Indexer) RouteAdvertisementsLister { + return &routeAdvertisementsLister{listers.New[*v1.RouteAdvertisements](indexer, v1.Resource("routeadvertisements"))} +} diff --git a/go-controller/pkg/crd/routeadvertisements/v1/doc.go b/go-controller/pkg/crd/routeadvertisements/v1/doc.go new file mode 100644 index 0000000000..e7024fd4f8 --- /dev/null +++ b/go-controller/pkg/crd/routeadvertisements/v1/doc.go @@ -0,0 +1,5 @@ +// Package v1 contains API Schema definitions for the RouteAdvertisements v1 API +// group +// +k8s:deepcopy-gen=package +// +groupName=k8s.ovn.org +package v1 diff --git a/go-controller/pkg/crd/routeadvertisements/v1/register.go b/go-controller/pkg/crd/routeadvertisements/v1/register.go new file mode 100644 index 0000000000..c2e4822462 --- /dev/null +++ b/go-controller/pkg/crd/routeadvertisements/v1/register.go @@ -0,0 +1,34 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "k8s.ovn.org" + SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &RouteAdvertisements{}, + &RouteAdvertisementsList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/go-controller/pkg/crd/routeadvertisements/v1/types.go b/go-controller/pkg/crd/routeadvertisements/v1/types.go new file mode 100644 index 0000000000..70c545bb25 --- /dev/null +++ b/go-controller/pkg/crd/routeadvertisements/v1/types.go @@ -0,0 +1,90 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:path=routeadvertisements,scope=Cluster,shortName=ra,singular=routeadvertisements +// +kubebuilder::singular=routeadvertisements +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Status",type=string,JSONPath=".status.status" +// RouteAdvertisements is the Schema for the routeadvertisements API +type RouteAdvertisements struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec RouteAdvertisementsSpec `json:"spec,omitempty"` + Status RouteAdvertisementsStatus `json:"status,omitempty"` +} + +// RouteAdvertisementsSpec defines the desired state of RouteAdvertisements +// +kubebuilder:validation:XValidation:rule="!has(self.nodeSelector) || !('PodNetwork' in self.advertisements)",message="If 'PodNetwork' is selected for advertisement, a 'nodeSelector' can't be specified as it needs to be advertised on all nodes" +type RouteAdvertisementsSpec struct { + // targetVRF determines which VRF the routes should be advertised in. + // +kubebuilder:validation:Optional + TargetVRF string `json:"targetVRF,omitempty"` + + // networkSelector determines which network routes should be advertised. To + // select the default network, match on label 'k8s.ovn.org/default-network'. + NetworkSelector metav1.LabelSelector `json:"networkSelector,omitempty"` + + // nodeSelector limits the advertisements to selected nodes. + // When omitted, all nodes are selected. + NodeSelector metav1.LabelSelector `json:"nodeSelector,omitempty"` + + // frrConfigurationSelector determines which FRRConfigurations will the + // OVN-Kubernetes driven FRRConfigurations be based on. + // When omitted, all FRRConfigurations will be considered. + FRRConfigurationSelector metav1.LabelSelector `json:"frrConfigurationSelector,omitempty"` + + // advertisements determines what is advertised. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" + Advertisements []AdvertisementType `json:"advertisements,omitempty"` +} + +// AdvertisementType determines the type of advertisement. +type AdvertisementType string + +const ( + // PodNetwork determines that the pod network is advertised. + PodNetwork AdvertisementType = "PodNetwork" + + // EgressIP determines that egress IPs are being advertised. + EgressIP AdvertisementType = "EgressIP" +) + +// RouteAdvertisementsStatus defines the observed state of RouteAdvertisements. +// It should always be reconstructable from the state of the cluster and/or +// outside world. +type RouteAdvertisementsStatus struct { + // status is a concise indication of whether the RouteAdvertisements + // resource is applied with success. + // +kubebuilder:validation:Optional + Status string `json:"status,omitempty"` + + // conditions is an array of condition objects indicating details about + // status of RouteAdvertisements object. + // +kubebuilder:validation:Optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} + +// RouteAdvertisementsList contains a list of RouteAdvertisements +// +kubebuilder:object:root=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type RouteAdvertisementsList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []RouteAdvertisements `json:"items"` +} diff --git a/go-controller/pkg/crd/routeadvertisements/v1/zz_generated.deepcopy.go b/go-controller/pkg/crd/routeadvertisements/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..484890a7cf --- /dev/null +++ b/go-controller/pkg/crd/routeadvertisements/v1/zz_generated.deepcopy.go @@ -0,0 +1,134 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteAdvertisements) DeepCopyInto(out *RouteAdvertisements) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteAdvertisements. +func (in *RouteAdvertisements) DeepCopy() *RouteAdvertisements { + if in == nil { + return nil + } + out := new(RouteAdvertisements) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RouteAdvertisements) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteAdvertisementsList) DeepCopyInto(out *RouteAdvertisementsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RouteAdvertisements, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteAdvertisementsList. +func (in *RouteAdvertisementsList) DeepCopy() *RouteAdvertisementsList { + if in == nil { + return nil + } + out := new(RouteAdvertisementsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RouteAdvertisementsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteAdvertisementsSpec) DeepCopyInto(out *RouteAdvertisementsSpec) { + *out = *in + in.NetworkSelector.DeepCopyInto(&out.NetworkSelector) + in.NodeSelector.DeepCopyInto(&out.NodeSelector) + in.FRRConfigurationSelector.DeepCopyInto(&out.FRRConfigurationSelector) + if in.Advertisements != nil { + in, out := &in.Advertisements, &out.Advertisements + *out = make([]AdvertisementType, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteAdvertisementsSpec. +func (in *RouteAdvertisementsSpec) DeepCopy() *RouteAdvertisementsSpec { + if in == nil { + return nil + } + out := new(RouteAdvertisementsSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteAdvertisementsStatus) DeepCopyInto(out *RouteAdvertisementsStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteAdvertisementsStatus. +func (in *RouteAdvertisementsStatus) DeepCopy() *RouteAdvertisementsStatus { + if in == nil { + return nil + } + out := new(RouteAdvertisementsStatus) + in.DeepCopyInto(out) + return out +} diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/clusteruserdefinednetwork.go b/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/clusteruserdefinednetwork.go new file mode 100644 index 0000000000..3a48285a6b --- /dev/null +++ b/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/clusteruserdefinednetwork.go @@ -0,0 +1,223 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ClusterUserDefinedNetworkApplyConfiguration represents a declarative configuration of the ClusterUserDefinedNetwork type for use +// with apply. +type ClusterUserDefinedNetworkApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ClusterUserDefinedNetworkSpecApplyConfiguration `json:"spec,omitempty"` + Status *ClusterUserDefinedNetworkStatusApplyConfiguration `json:"status,omitempty"` +} + +// ClusterUserDefinedNetwork constructs a declarative configuration of the ClusterUserDefinedNetwork type for use with +// apply. +func ClusterUserDefinedNetwork(name string) *ClusterUserDefinedNetworkApplyConfiguration { + b := &ClusterUserDefinedNetworkApplyConfiguration{} + b.WithName(name) + b.WithKind("ClusterUserDefinedNetwork") + b.WithAPIVersion("k8s.ovn.org/v1") + return b +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ClusterUserDefinedNetworkApplyConfiguration) WithKind(value string) *ClusterUserDefinedNetworkApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ClusterUserDefinedNetworkApplyConfiguration) WithAPIVersion(value string) *ClusterUserDefinedNetworkApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ClusterUserDefinedNetworkApplyConfiguration) WithName(value string) *ClusterUserDefinedNetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ClusterUserDefinedNetworkApplyConfiguration) WithGenerateName(value string) *ClusterUserDefinedNetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ClusterUserDefinedNetworkApplyConfiguration) WithNamespace(value string) *ClusterUserDefinedNetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ClusterUserDefinedNetworkApplyConfiguration) WithUID(value types.UID) *ClusterUserDefinedNetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ClusterUserDefinedNetworkApplyConfiguration) WithResourceVersion(value string) *ClusterUserDefinedNetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ClusterUserDefinedNetworkApplyConfiguration) WithGeneration(value int64) *ClusterUserDefinedNetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ClusterUserDefinedNetworkApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterUserDefinedNetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ClusterUserDefinedNetworkApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterUserDefinedNetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ClusterUserDefinedNetworkApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterUserDefinedNetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ClusterUserDefinedNetworkApplyConfiguration) WithLabels(entries map[string]string) *ClusterUserDefinedNetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ClusterUserDefinedNetworkApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterUserDefinedNetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ClusterUserDefinedNetworkApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ClusterUserDefinedNetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ClusterUserDefinedNetworkApplyConfiguration) WithFinalizers(values ...string) *ClusterUserDefinedNetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ClusterUserDefinedNetworkApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ClusterUserDefinedNetworkApplyConfiguration) WithSpec(value *ClusterUserDefinedNetworkSpecApplyConfiguration) *ClusterUserDefinedNetworkApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ClusterUserDefinedNetworkApplyConfiguration) WithStatus(value *ClusterUserDefinedNetworkStatusApplyConfiguration) *ClusterUserDefinedNetworkApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterUserDefinedNetworkApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/clusteruserdefinednetworkspec.go b/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/clusteruserdefinednetworkspec.go new file mode 100644 index 0000000000..a0e7fdfc57 --- /dev/null +++ b/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/clusteruserdefinednetworkspec.go @@ -0,0 +1,51 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ClusterUserDefinedNetworkSpecApplyConfiguration represents a declarative configuration of the ClusterUserDefinedNetworkSpec type for use +// with apply. +type ClusterUserDefinedNetworkSpecApplyConfiguration struct { + NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + Network *NetworkSpecApplyConfiguration `json:"network,omitempty"` +} + +// ClusterUserDefinedNetworkSpecApplyConfiguration constructs a declarative configuration of the ClusterUserDefinedNetworkSpec type for use with +// apply. +func ClusterUserDefinedNetworkSpec() *ClusterUserDefinedNetworkSpecApplyConfiguration { + return &ClusterUserDefinedNetworkSpecApplyConfiguration{} +} + +// WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NamespaceSelector field is set to the value of the last call. +func (b *ClusterUserDefinedNetworkSpecApplyConfiguration) WithNamespaceSelector(value *v1.LabelSelectorApplyConfiguration) *ClusterUserDefinedNetworkSpecApplyConfiguration { + b.NamespaceSelector = value + return b +} + +// WithNetwork sets the Network field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Network field is set to the value of the last call. +func (b *ClusterUserDefinedNetworkSpecApplyConfiguration) WithNetwork(value *NetworkSpecApplyConfiguration) *ClusterUserDefinedNetworkSpecApplyConfiguration { + b.Network = value + return b +} diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/clusteruserdefinednetworkstatus.go b/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/clusteruserdefinednetworkstatus.go new file mode 100644 index 0000000000..470c0307df --- /dev/null +++ b/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/clusteruserdefinednetworkstatus.go @@ -0,0 +1,47 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ClusterUserDefinedNetworkStatusApplyConfiguration represents a declarative configuration of the ClusterUserDefinedNetworkStatus type for use +// with apply. +type ClusterUserDefinedNetworkStatusApplyConfiguration struct { + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// ClusterUserDefinedNetworkStatusApplyConfiguration constructs a declarative configuration of the ClusterUserDefinedNetworkStatus type for use with +// apply. +func ClusterUserDefinedNetworkStatus() *ClusterUserDefinedNetworkStatusApplyConfiguration { + return &ClusterUserDefinedNetworkStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ClusterUserDefinedNetworkStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *ClusterUserDefinedNetworkStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/layer2config.go b/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/layer2config.go index f4a6616c17..4b4e68d353 100644 --- a/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/layer2config.go +++ b/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/layer2config.go @@ -21,7 +21,7 @@ import ( v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1" ) -// Layer2ConfigApplyConfiguration represents an declarative configuration of the Layer2Config type for use +// Layer2ConfigApplyConfiguration represents a declarative configuration of the Layer2Config type for use // with apply. type Layer2ConfigApplyConfiguration struct { Role *v1.NetworkRole `json:"role,omitempty"` @@ -31,7 +31,7 @@ type Layer2ConfigApplyConfiguration struct { IPAMLifecycle *v1.NetworkIPAMLifecycle `json:"ipamLifecycle,omitempty"` } -// Layer2ConfigApplyConfiguration constructs an declarative configuration of the Layer2Config type for use with +// Layer2ConfigApplyConfiguration constructs a declarative configuration of the Layer2Config type for use with // apply. func Layer2Config() *Layer2ConfigApplyConfiguration { return &Layer2ConfigApplyConfiguration{} diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/layer3config.go b/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/layer3config.go index 31dd56af04..b29842f784 100644 --- a/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/layer3config.go +++ b/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/layer3config.go @@ -21,7 +21,7 @@ import ( v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1" ) -// Layer3ConfigApplyConfiguration represents an declarative configuration of the Layer3Config type for use +// Layer3ConfigApplyConfiguration represents a declarative configuration of the Layer3Config type for use // with apply. type Layer3ConfigApplyConfiguration struct { Role *v1.NetworkRole `json:"role,omitempty"` @@ -30,7 +30,7 @@ type Layer3ConfigApplyConfiguration struct { JoinSubnets *v1.DualStackCIDRs `json:"joinSubnets,omitempty"` } -// Layer3ConfigApplyConfiguration constructs an declarative configuration of the Layer3Config type for use with +// Layer3ConfigApplyConfiguration constructs a declarative configuration of the Layer3Config type for use with // apply. func Layer3Config() *Layer3ConfigApplyConfiguration { return &Layer3ConfigApplyConfiguration{} diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/layer3subnet.go b/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/layer3subnet.go index 95b7a88552..5994932ccc 100644 --- a/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/layer3subnet.go +++ b/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/layer3subnet.go @@ -21,14 +21,14 @@ import ( v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1" ) -// Layer3SubnetApplyConfiguration represents an declarative configuration of the Layer3Subnet type for use +// Layer3SubnetApplyConfiguration represents a declarative configuration of the Layer3Subnet type for use // with apply. type Layer3SubnetApplyConfiguration struct { CIDR *v1.CIDR `json:"cidr,omitempty"` HostSubnet *int32 `json:"hostSubnet,omitempty"` } -// Layer3SubnetApplyConfiguration constructs an declarative configuration of the Layer3Subnet type for use with +// Layer3SubnetApplyConfiguration constructs a declarative configuration of the Layer3Subnet type for use with // apply. func Layer3Subnet() *Layer3SubnetApplyConfiguration { return &Layer3SubnetApplyConfiguration{} diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/networkspec.go b/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/networkspec.go new file mode 100644 index 0000000000..120e3a39ea --- /dev/null +++ b/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/networkspec.go @@ -0,0 +1,60 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1" +) + +// NetworkSpecApplyConfiguration represents a declarative configuration of the NetworkSpec type for use +// with apply. +type NetworkSpecApplyConfiguration struct { + Topology *v1.NetworkTopology `json:"topology,omitempty"` + Layer3 *Layer3ConfigApplyConfiguration `json:"layer3,omitempty"` + Layer2 *Layer2ConfigApplyConfiguration `json:"layer2,omitempty"` +} + +// NetworkSpecApplyConfiguration constructs a declarative configuration of the NetworkSpec type for use with +// apply. +func NetworkSpec() *NetworkSpecApplyConfiguration { + return &NetworkSpecApplyConfiguration{} +} + +// WithTopology sets the Topology field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Topology field is set to the value of the last call. +func (b *NetworkSpecApplyConfiguration) WithTopology(value v1.NetworkTopology) *NetworkSpecApplyConfiguration { + b.Topology = &value + return b +} + +// WithLayer3 sets the Layer3 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Layer3 field is set to the value of the last call. +func (b *NetworkSpecApplyConfiguration) WithLayer3(value *Layer3ConfigApplyConfiguration) *NetworkSpecApplyConfiguration { + b.Layer3 = value + return b +} + +// WithLayer2 sets the Layer2 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Layer2 field is set to the value of the last call. +func (b *NetworkSpecApplyConfiguration) WithLayer2(value *Layer2ConfigApplyConfiguration) *NetworkSpecApplyConfiguration { + b.Layer2 = value + return b +} diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/userdefinednetwork.go b/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/userdefinednetwork.go index dea66f90e1..247c382c4f 100644 --- a/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/userdefinednetwork.go +++ b/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/userdefinednetwork.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// UserDefinedNetworkApplyConfiguration represents an declarative configuration of the UserDefinedNetwork type for use +// UserDefinedNetworkApplyConfiguration represents a declarative configuration of the UserDefinedNetwork type for use // with apply. type UserDefinedNetworkApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -32,7 +32,7 @@ type UserDefinedNetworkApplyConfiguration struct { Status *UserDefinedNetworkStatusApplyConfiguration `json:"status,omitempty"` } -// UserDefinedNetwork constructs an declarative configuration of the UserDefinedNetwork type for use with +// UserDefinedNetwork constructs a declarative configuration of the UserDefinedNetwork type for use with // apply. func UserDefinedNetwork(name, namespace string) *UserDefinedNetworkApplyConfiguration { b := &UserDefinedNetworkApplyConfiguration{} @@ -216,3 +216,9 @@ func (b *UserDefinedNetworkApplyConfiguration) WithStatus(value *UserDefinedNetw b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *UserDefinedNetworkApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/userdefinednetworkspec.go b/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/userdefinednetworkspec.go index ce1d039271..02bcd412eb 100644 --- a/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/userdefinednetworkspec.go +++ b/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/userdefinednetworkspec.go @@ -21,7 +21,7 @@ import ( v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1" ) -// UserDefinedNetworkSpecApplyConfiguration represents an declarative configuration of the UserDefinedNetworkSpec type for use +// UserDefinedNetworkSpecApplyConfiguration represents a declarative configuration of the UserDefinedNetworkSpec type for use // with apply. type UserDefinedNetworkSpecApplyConfiguration struct { Topology *v1.NetworkTopology `json:"topology,omitempty"` @@ -29,7 +29,7 @@ type UserDefinedNetworkSpecApplyConfiguration struct { Layer2 *Layer2ConfigApplyConfiguration `json:"layer2,omitempty"` } -// UserDefinedNetworkSpecApplyConfiguration constructs an declarative configuration of the UserDefinedNetworkSpec type for use with +// UserDefinedNetworkSpecApplyConfiguration constructs a declarative configuration of the UserDefinedNetworkSpec type for use with // apply. func UserDefinedNetworkSpec() *UserDefinedNetworkSpecApplyConfiguration { return &UserDefinedNetworkSpecApplyConfiguration{} diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/userdefinednetworkstatus.go b/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/userdefinednetworkstatus.go index b573076151..44b233ff13 100644 --- a/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/userdefinednetworkstatus.go +++ b/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1/userdefinednetworkstatus.go @@ -18,16 +18,16 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// UserDefinedNetworkStatusApplyConfiguration represents an declarative configuration of the UserDefinedNetworkStatus type for use +// UserDefinedNetworkStatusApplyConfiguration represents a declarative configuration of the UserDefinedNetworkStatus type for use // with apply. type UserDefinedNetworkStatusApplyConfiguration struct { - Conditions []v1.Condition `json:"conditions,omitempty"` + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` } -// UserDefinedNetworkStatusApplyConfiguration constructs an declarative configuration of the UserDefinedNetworkStatus type for use with +// UserDefinedNetworkStatusApplyConfiguration constructs a declarative configuration of the UserDefinedNetworkStatus type for use with // apply. func UserDefinedNetworkStatus() *UserDefinedNetworkStatusApplyConfiguration { return &UserDefinedNetworkStatusApplyConfiguration{} @@ -36,9 +36,12 @@ func UserDefinedNetworkStatus() *UserDefinedNetworkStatusApplyConfiguration { // WithConditions adds the given value to the Conditions field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Conditions field. -func (b *UserDefinedNetworkStatusApplyConfiguration) WithConditions(values ...v1.Condition) *UserDefinedNetworkStatusApplyConfiguration { +func (b *UserDefinedNetworkStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *UserDefinedNetworkStatusApplyConfiguration { for i := range values { - b.Conditions = append(b.Conditions, values[i]) + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) } return b } diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/utils.go b/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/utils.go index 71ed7feb68..c60ceb338e 100644 --- a/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/utils.go +++ b/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/utils.go @@ -19,8 +19,11 @@ package applyconfiguration import ( v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1" + internal "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/internal" userdefinednetworkv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1" + runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" ) // ForKind returns an apply configuration type for the given GroupVersionKind, or nil if no @@ -28,12 +31,20 @@ import ( func ForKind(kind schema.GroupVersionKind) interface{} { switch kind { // Group=k8s.ovn.org, Version=v1 + case v1.SchemeGroupVersion.WithKind("ClusterUserDefinedNetwork"): + return &userdefinednetworkv1.ClusterUserDefinedNetworkApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ClusterUserDefinedNetworkSpec"): + return &userdefinednetworkv1.ClusterUserDefinedNetworkSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ClusterUserDefinedNetworkStatus"): + return &userdefinednetworkv1.ClusterUserDefinedNetworkStatusApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("Layer2Config"): return &userdefinednetworkv1.Layer2ConfigApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("Layer3Config"): return &userdefinednetworkv1.Layer3ConfigApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("Layer3Subnet"): return &userdefinednetworkv1.Layer3SubnetApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("NetworkSpec"): + return &userdefinednetworkv1.NetworkSpecApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("UserDefinedNetwork"): return &userdefinednetworkv1.UserDefinedNetworkApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("UserDefinedNetworkSpec"): @@ -44,3 +55,7 @@ func ForKind(kind schema.GroupVersionKind) interface{} { } return nil } + +func NewTypeConverter(scheme *runtime.Scheme) *testing.TypeConverter { + return &testing.TypeConverter{Scheme: scheme, TypeResolver: internal.Parser()} +} diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/fake/clientset_generated.go b/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/fake/clientset_generated.go index 5110a3e2ac..df45c651c4 100644 --- a/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/fake/clientset_generated.go +++ b/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/fake/clientset_generated.go @@ -18,6 +18,7 @@ limitations under the License. package fake import ( + applyconfiguration "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration" clientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned" k8sv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/typed/userdefinednetwork/v1" fakek8sv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/typed/userdefinednetwork/v1/fake" @@ -30,8 +31,12 @@ import ( // NewSimpleClientset returns a clientset that will respond with the provided objects. // It's backed by a very simple object tracker that processes creates, updates and deletions as-is, -// without applying any validations and/or defaults. It shouldn't be considered a replacement +// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement // for a real clientset and is mostly useful in simple unit tests. +// +// DEPRECATED: NewClientset replaces this with support for field management, which significantly improves +// server side apply testing. NewClientset is only available when apply configurations are generated (e.g. +// via --with-applyconfig). func NewSimpleClientset(objects ...runtime.Object) *Clientset { o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) for _, obj := range objects { @@ -73,6 +78,38 @@ func (c *Clientset) Tracker() testing.ObjectTracker { return c.tracker } +// NewClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewClientset(objects ...runtime.Object) *Clientset { + o := testing.NewFieldManagedObjectTracker( + scheme, + codecs.UniversalDecoder(), + applyconfiguration.NewTypeConverter(scheme), + ) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + var ( _ clientset.Interface = &Clientset{} _ testing.FakeClient = &Clientset{} diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/typed/userdefinednetwork/v1/clusteruserdefinednetwork.go b/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/typed/userdefinednetwork/v1/clusteruserdefinednetwork.go new file mode 100644 index 0000000000..ea898d732c --- /dev/null +++ b/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/typed/userdefinednetwork/v1/clusteruserdefinednetwork.go @@ -0,0 +1,72 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + + v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1" + userdefinednetworkv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1" + scheme "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// ClusterUserDefinedNetworksGetter has a method to return a ClusterUserDefinedNetworkInterface. +// A group's client should implement this interface. +type ClusterUserDefinedNetworksGetter interface { + ClusterUserDefinedNetworks() ClusterUserDefinedNetworkInterface +} + +// ClusterUserDefinedNetworkInterface has methods to work with ClusterUserDefinedNetwork resources. +type ClusterUserDefinedNetworkInterface interface { + Create(ctx context.Context, clusterUserDefinedNetwork *v1.ClusterUserDefinedNetwork, opts metav1.CreateOptions) (*v1.ClusterUserDefinedNetwork, error) + Update(ctx context.Context, clusterUserDefinedNetwork *v1.ClusterUserDefinedNetwork, opts metav1.UpdateOptions) (*v1.ClusterUserDefinedNetwork, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, clusterUserDefinedNetwork *v1.ClusterUserDefinedNetwork, opts metav1.UpdateOptions) (*v1.ClusterUserDefinedNetwork, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterUserDefinedNetwork, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterUserDefinedNetworkList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterUserDefinedNetwork, err error) + Apply(ctx context.Context, clusterUserDefinedNetwork *userdefinednetworkv1.ClusterUserDefinedNetworkApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterUserDefinedNetwork, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, clusterUserDefinedNetwork *userdefinednetworkv1.ClusterUserDefinedNetworkApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterUserDefinedNetwork, err error) + ClusterUserDefinedNetworkExpansion +} + +// clusterUserDefinedNetworks implements ClusterUserDefinedNetworkInterface +type clusterUserDefinedNetworks struct { + *gentype.ClientWithListAndApply[*v1.ClusterUserDefinedNetwork, *v1.ClusterUserDefinedNetworkList, *userdefinednetworkv1.ClusterUserDefinedNetworkApplyConfiguration] +} + +// newClusterUserDefinedNetworks returns a ClusterUserDefinedNetworks +func newClusterUserDefinedNetworks(c *K8sV1Client) *clusterUserDefinedNetworks { + return &clusterUserDefinedNetworks{ + gentype.NewClientWithListAndApply[*v1.ClusterUserDefinedNetwork, *v1.ClusterUserDefinedNetworkList, *userdefinednetworkv1.ClusterUserDefinedNetworkApplyConfiguration]( + "clusteruserdefinednetworks", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.ClusterUserDefinedNetwork { return &v1.ClusterUserDefinedNetwork{} }, + func() *v1.ClusterUserDefinedNetworkList { return &v1.ClusterUserDefinedNetworkList{} }), + } +} diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/typed/userdefinednetwork/v1/fake/fake_clusteruserdefinednetwork.go b/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/typed/userdefinednetwork/v1/fake/fake_clusteruserdefinednetwork.go new file mode 100644 index 0000000000..949c5c3489 --- /dev/null +++ b/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/typed/userdefinednetwork/v1/fake/fake_clusteruserdefinednetwork.go @@ -0,0 +1,185 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1" + userdefinednetworkv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeClusterUserDefinedNetworks implements ClusterUserDefinedNetworkInterface +type FakeClusterUserDefinedNetworks struct { + Fake *FakeK8sV1 +} + +var clusteruserdefinednetworksResource = v1.SchemeGroupVersion.WithResource("clusteruserdefinednetworks") + +var clusteruserdefinednetworksKind = v1.SchemeGroupVersion.WithKind("ClusterUserDefinedNetwork") + +// Get takes name of the clusterUserDefinedNetwork, and returns the corresponding clusterUserDefinedNetwork object, and an error if there is any. +func (c *FakeClusterUserDefinedNetworks) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterUserDefinedNetwork, err error) { + emptyResult := &v1.ClusterUserDefinedNetwork{} + obj, err := c.Fake. + Invokes(testing.NewRootGetActionWithOptions(clusteruserdefinednetworksResource, name, options), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ClusterUserDefinedNetwork), err +} + +// List takes label and field selectors, and returns the list of ClusterUserDefinedNetworks that match those selectors. +func (c *FakeClusterUserDefinedNetworks) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterUserDefinedNetworkList, err error) { + emptyResult := &v1.ClusterUserDefinedNetworkList{} + obj, err := c.Fake. + Invokes(testing.NewRootListActionWithOptions(clusteruserdefinednetworksResource, clusteruserdefinednetworksKind, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.ClusterUserDefinedNetworkList{ListMeta: obj.(*v1.ClusterUserDefinedNetworkList).ListMeta} + for _, item := range obj.(*v1.ClusterUserDefinedNetworkList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusterUserDefinedNetworks. +func (c *FakeClusterUserDefinedNetworks) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchActionWithOptions(clusteruserdefinednetworksResource, opts)) +} + +// Create takes the representation of a clusterUserDefinedNetwork and creates it. Returns the server's representation of the clusterUserDefinedNetwork, and an error, if there is any. +func (c *FakeClusterUserDefinedNetworks) Create(ctx context.Context, clusterUserDefinedNetwork *v1.ClusterUserDefinedNetwork, opts metav1.CreateOptions) (result *v1.ClusterUserDefinedNetwork, err error) { + emptyResult := &v1.ClusterUserDefinedNetwork{} + obj, err := c.Fake. + Invokes(testing.NewRootCreateActionWithOptions(clusteruserdefinednetworksResource, clusterUserDefinedNetwork, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ClusterUserDefinedNetwork), err +} + +// Update takes the representation of a clusterUserDefinedNetwork and updates it. Returns the server's representation of the clusterUserDefinedNetwork, and an error, if there is any. +func (c *FakeClusterUserDefinedNetworks) Update(ctx context.Context, clusterUserDefinedNetwork *v1.ClusterUserDefinedNetwork, opts metav1.UpdateOptions) (result *v1.ClusterUserDefinedNetwork, err error) { + emptyResult := &v1.ClusterUserDefinedNetwork{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateActionWithOptions(clusteruserdefinednetworksResource, clusterUserDefinedNetwork, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ClusterUserDefinedNetwork), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeClusterUserDefinedNetworks) UpdateStatus(ctx context.Context, clusterUserDefinedNetwork *v1.ClusterUserDefinedNetwork, opts metav1.UpdateOptions) (result *v1.ClusterUserDefinedNetwork, err error) { + emptyResult := &v1.ClusterUserDefinedNetwork{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(clusteruserdefinednetworksResource, "status", clusterUserDefinedNetwork, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ClusterUserDefinedNetwork), err +} + +// Delete takes name of the clusterUserDefinedNetwork and deletes it. Returns an error if one occurs. +func (c *FakeClusterUserDefinedNetworks) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(clusteruserdefinednetworksResource, name, opts), &v1.ClusterUserDefinedNetwork{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeClusterUserDefinedNetworks) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + action := testing.NewRootDeleteCollectionActionWithOptions(clusteruserdefinednetworksResource, opts, listOpts) + + _, err := c.Fake.Invokes(action, &v1.ClusterUserDefinedNetworkList{}) + return err +} + +// Patch applies the patch and returns the patched clusterUserDefinedNetwork. +func (c *FakeClusterUserDefinedNetworks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterUserDefinedNetwork, err error) { + emptyResult := &v1.ClusterUserDefinedNetwork{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusteruserdefinednetworksResource, name, pt, data, opts, subresources...), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ClusterUserDefinedNetwork), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied clusterUserDefinedNetwork. +func (c *FakeClusterUserDefinedNetworks) Apply(ctx context.Context, clusterUserDefinedNetwork *userdefinednetworkv1.ClusterUserDefinedNetworkApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterUserDefinedNetwork, err error) { + if clusterUserDefinedNetwork == nil { + return nil, fmt.Errorf("clusterUserDefinedNetwork provided to Apply must not be nil") + } + data, err := json.Marshal(clusterUserDefinedNetwork) + if err != nil { + return nil, err + } + name := clusterUserDefinedNetwork.Name + if name == nil { + return nil, fmt.Errorf("clusterUserDefinedNetwork.Name must be provided to Apply") + } + emptyResult := &v1.ClusterUserDefinedNetwork{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusteruserdefinednetworksResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ClusterUserDefinedNetwork), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeClusterUserDefinedNetworks) ApplyStatus(ctx context.Context, clusterUserDefinedNetwork *userdefinednetworkv1.ClusterUserDefinedNetworkApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterUserDefinedNetwork, err error) { + if clusterUserDefinedNetwork == nil { + return nil, fmt.Errorf("clusterUserDefinedNetwork provided to Apply must not be nil") + } + data, err := json.Marshal(clusterUserDefinedNetwork) + if err != nil { + return nil, err + } + name := clusterUserDefinedNetwork.Name + if name == nil { + return nil, fmt.Errorf("clusterUserDefinedNetwork.Name must be provided to Apply") + } + emptyResult := &v1.ClusterUserDefinedNetwork{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusteruserdefinednetworksResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ClusterUserDefinedNetwork), err +} diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/typed/userdefinednetwork/v1/fake/fake_userdefinednetwork.go b/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/typed/userdefinednetwork/v1/fake/fake_userdefinednetwork.go index 8efd74af04..66274f454c 100644 --- a/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/typed/userdefinednetwork/v1/fake/fake_userdefinednetwork.go +++ b/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/typed/userdefinednetwork/v1/fake/fake_userdefinednetwork.go @@ -43,22 +43,24 @@ var userdefinednetworksKind = v1.SchemeGroupVersion.WithKind("UserDefinedNetwork // Get takes name of the userDefinedNetwork, and returns the corresponding userDefinedNetwork object, and an error if there is any. func (c *FakeUserDefinedNetworks) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.UserDefinedNetwork, err error) { + emptyResult := &v1.UserDefinedNetwork{} obj, err := c.Fake. - Invokes(testing.NewGetAction(userdefinednetworksResource, c.ns, name), &v1.UserDefinedNetwork{}) + Invokes(testing.NewGetActionWithOptions(userdefinednetworksResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.UserDefinedNetwork), err } // List takes label and field selectors, and returns the list of UserDefinedNetworks that match those selectors. func (c *FakeUserDefinedNetworks) List(ctx context.Context, opts metav1.ListOptions) (result *v1.UserDefinedNetworkList, err error) { + emptyResult := &v1.UserDefinedNetworkList{} obj, err := c.Fake. - Invokes(testing.NewListAction(userdefinednetworksResource, userdefinednetworksKind, c.ns, opts), &v1.UserDefinedNetworkList{}) + Invokes(testing.NewListActionWithOptions(userdefinednetworksResource, userdefinednetworksKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -77,40 +79,43 @@ func (c *FakeUserDefinedNetworks) List(ctx context.Context, opts metav1.ListOpti // Watch returns a watch.Interface that watches the requested userDefinedNetworks. func (c *FakeUserDefinedNetworks) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(userdefinednetworksResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(userdefinednetworksResource, c.ns, opts)) } // Create takes the representation of a userDefinedNetwork and creates it. Returns the server's representation of the userDefinedNetwork, and an error, if there is any. func (c *FakeUserDefinedNetworks) Create(ctx context.Context, userDefinedNetwork *v1.UserDefinedNetwork, opts metav1.CreateOptions) (result *v1.UserDefinedNetwork, err error) { + emptyResult := &v1.UserDefinedNetwork{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(userdefinednetworksResource, c.ns, userDefinedNetwork), &v1.UserDefinedNetwork{}) + Invokes(testing.NewCreateActionWithOptions(userdefinednetworksResource, c.ns, userDefinedNetwork, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.UserDefinedNetwork), err } // Update takes the representation of a userDefinedNetwork and updates it. Returns the server's representation of the userDefinedNetwork, and an error, if there is any. func (c *FakeUserDefinedNetworks) Update(ctx context.Context, userDefinedNetwork *v1.UserDefinedNetwork, opts metav1.UpdateOptions) (result *v1.UserDefinedNetwork, err error) { + emptyResult := &v1.UserDefinedNetwork{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(userdefinednetworksResource, c.ns, userDefinedNetwork), &v1.UserDefinedNetwork{}) + Invokes(testing.NewUpdateActionWithOptions(userdefinednetworksResource, c.ns, userDefinedNetwork, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.UserDefinedNetwork), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeUserDefinedNetworks) UpdateStatus(ctx context.Context, userDefinedNetwork *v1.UserDefinedNetwork, opts metav1.UpdateOptions) (*v1.UserDefinedNetwork, error) { +func (c *FakeUserDefinedNetworks) UpdateStatus(ctx context.Context, userDefinedNetwork *v1.UserDefinedNetwork, opts metav1.UpdateOptions) (result *v1.UserDefinedNetwork, err error) { + emptyResult := &v1.UserDefinedNetwork{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(userdefinednetworksResource, "status", c.ns, userDefinedNetwork), &v1.UserDefinedNetwork{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(userdefinednetworksResource, "status", c.ns, userDefinedNetwork, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.UserDefinedNetwork), err } @@ -125,7 +130,7 @@ func (c *FakeUserDefinedNetworks) Delete(ctx context.Context, name string, opts // DeleteCollection deletes a collection of objects. func (c *FakeUserDefinedNetworks) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(userdefinednetworksResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(userdefinednetworksResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.UserDefinedNetworkList{}) return err @@ -133,11 +138,12 @@ func (c *FakeUserDefinedNetworks) DeleteCollection(ctx context.Context, opts met // Patch applies the patch and returns the patched userDefinedNetwork. func (c *FakeUserDefinedNetworks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.UserDefinedNetwork, err error) { + emptyResult := &v1.UserDefinedNetwork{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(userdefinednetworksResource, c.ns, name, pt, data, subresources...), &v1.UserDefinedNetwork{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(userdefinednetworksResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.UserDefinedNetwork), err } @@ -155,11 +161,12 @@ func (c *FakeUserDefinedNetworks) Apply(ctx context.Context, userDefinedNetwork if name == nil { return nil, fmt.Errorf("userDefinedNetwork.Name must be provided to Apply") } + emptyResult := &v1.UserDefinedNetwork{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(userdefinednetworksResource, c.ns, *name, types.ApplyPatchType, data), &v1.UserDefinedNetwork{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(userdefinednetworksResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.UserDefinedNetwork), err } @@ -178,11 +185,12 @@ func (c *FakeUserDefinedNetworks) ApplyStatus(ctx context.Context, userDefinedNe if name == nil { return nil, fmt.Errorf("userDefinedNetwork.Name must be provided to Apply") } + emptyResult := &v1.UserDefinedNetwork{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(userdefinednetworksResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.UserDefinedNetwork{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(userdefinednetworksResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.UserDefinedNetwork), err } diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/typed/userdefinednetwork/v1/fake/fake_userdefinednetwork_client.go b/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/typed/userdefinednetwork/v1/fake/fake_userdefinednetwork_client.go index 448f82a78d..7205afaafa 100644 --- a/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/typed/userdefinednetwork/v1/fake/fake_userdefinednetwork_client.go +++ b/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/typed/userdefinednetwork/v1/fake/fake_userdefinednetwork_client.go @@ -27,6 +27,10 @@ type FakeK8sV1 struct { *testing.Fake } +func (c *FakeK8sV1) ClusterUserDefinedNetworks() v1.ClusterUserDefinedNetworkInterface { + return &FakeClusterUserDefinedNetworks{c} +} + func (c *FakeK8sV1) UserDefinedNetworks(namespace string) v1.UserDefinedNetworkInterface { return &FakeUserDefinedNetworks{c, namespace} } diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/typed/userdefinednetwork/v1/generated_expansion.go b/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/typed/userdefinednetwork/v1/generated_expansion.go index 6df53d07d5..6f35e584c5 100644 --- a/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/typed/userdefinednetwork/v1/generated_expansion.go +++ b/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/typed/userdefinednetwork/v1/generated_expansion.go @@ -17,4 +17,6 @@ limitations under the License. package v1 +type ClusterUserDefinedNetworkExpansion interface{} + type UserDefinedNetworkExpansion interface{} diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/typed/userdefinednetwork/v1/userdefinednetwork.go b/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/typed/userdefinednetwork/v1/userdefinednetwork.go index 96adc4a33e..81f8bcc0ca 100644 --- a/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/typed/userdefinednetwork/v1/userdefinednetwork.go +++ b/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/typed/userdefinednetwork/v1/userdefinednetwork.go @@ -19,9 +19,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1" userdefinednetworkv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/applyconfiguration/userdefinednetwork/v1" @@ -29,7 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // UserDefinedNetworksGetter has a method to return a UserDefinedNetworkInterface. @@ -42,6 +39,7 @@ type UserDefinedNetworksGetter interface { type UserDefinedNetworkInterface interface { Create(ctx context.Context, userDefinedNetwork *v1.UserDefinedNetwork, opts metav1.CreateOptions) (*v1.UserDefinedNetwork, error) Update(ctx context.Context, userDefinedNetwork *v1.UserDefinedNetwork, opts metav1.UpdateOptions) (*v1.UserDefinedNetwork, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, userDefinedNetwork *v1.UserDefinedNetwork, opts metav1.UpdateOptions) (*v1.UserDefinedNetwork, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -50,206 +48,25 @@ type UserDefinedNetworkInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.UserDefinedNetwork, err error) Apply(ctx context.Context, userDefinedNetwork *userdefinednetworkv1.UserDefinedNetworkApplyConfiguration, opts metav1.ApplyOptions) (result *v1.UserDefinedNetwork, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, userDefinedNetwork *userdefinednetworkv1.UserDefinedNetworkApplyConfiguration, opts metav1.ApplyOptions) (result *v1.UserDefinedNetwork, err error) UserDefinedNetworkExpansion } // userDefinedNetworks implements UserDefinedNetworkInterface type userDefinedNetworks struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.UserDefinedNetwork, *v1.UserDefinedNetworkList, *userdefinednetworkv1.UserDefinedNetworkApplyConfiguration] } // newUserDefinedNetworks returns a UserDefinedNetworks func newUserDefinedNetworks(c *K8sV1Client, namespace string) *userDefinedNetworks { return &userDefinedNetworks{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.UserDefinedNetwork, *v1.UserDefinedNetworkList, *userdefinednetworkv1.UserDefinedNetworkApplyConfiguration]( + "userdefinednetworks", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.UserDefinedNetwork { return &v1.UserDefinedNetwork{} }, + func() *v1.UserDefinedNetworkList { return &v1.UserDefinedNetworkList{} }), } } - -// Get takes name of the userDefinedNetwork, and returns the corresponding userDefinedNetwork object, and an error if there is any. -func (c *userDefinedNetworks) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.UserDefinedNetwork, err error) { - result = &v1.UserDefinedNetwork{} - err = c.client.Get(). - Namespace(c.ns). - Resource("userdefinednetworks"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of UserDefinedNetworks that match those selectors. -func (c *userDefinedNetworks) List(ctx context.Context, opts metav1.ListOptions) (result *v1.UserDefinedNetworkList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.UserDefinedNetworkList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("userdefinednetworks"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested userDefinedNetworks. -func (c *userDefinedNetworks) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("userdefinednetworks"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a userDefinedNetwork and creates it. Returns the server's representation of the userDefinedNetwork, and an error, if there is any. -func (c *userDefinedNetworks) Create(ctx context.Context, userDefinedNetwork *v1.UserDefinedNetwork, opts metav1.CreateOptions) (result *v1.UserDefinedNetwork, err error) { - result = &v1.UserDefinedNetwork{} - err = c.client.Post(). - Namespace(c.ns). - Resource("userdefinednetworks"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(userDefinedNetwork). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a userDefinedNetwork and updates it. Returns the server's representation of the userDefinedNetwork, and an error, if there is any. -func (c *userDefinedNetworks) Update(ctx context.Context, userDefinedNetwork *v1.UserDefinedNetwork, opts metav1.UpdateOptions) (result *v1.UserDefinedNetwork, err error) { - result = &v1.UserDefinedNetwork{} - err = c.client.Put(). - Namespace(c.ns). - Resource("userdefinednetworks"). - Name(userDefinedNetwork.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(userDefinedNetwork). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *userDefinedNetworks) UpdateStatus(ctx context.Context, userDefinedNetwork *v1.UserDefinedNetwork, opts metav1.UpdateOptions) (result *v1.UserDefinedNetwork, err error) { - result = &v1.UserDefinedNetwork{} - err = c.client.Put(). - Namespace(c.ns). - Resource("userdefinednetworks"). - Name(userDefinedNetwork.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(userDefinedNetwork). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the userDefinedNetwork and deletes it. Returns an error if one occurs. -func (c *userDefinedNetworks) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("userdefinednetworks"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *userDefinedNetworks) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("userdefinednetworks"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched userDefinedNetwork. -func (c *userDefinedNetworks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.UserDefinedNetwork, err error) { - result = &v1.UserDefinedNetwork{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("userdefinednetworks"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied userDefinedNetwork. -func (c *userDefinedNetworks) Apply(ctx context.Context, userDefinedNetwork *userdefinednetworkv1.UserDefinedNetworkApplyConfiguration, opts metav1.ApplyOptions) (result *v1.UserDefinedNetwork, err error) { - if userDefinedNetwork == nil { - return nil, fmt.Errorf("userDefinedNetwork provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(userDefinedNetwork) - if err != nil { - return nil, err - } - name := userDefinedNetwork.Name - if name == nil { - return nil, fmt.Errorf("userDefinedNetwork.Name must be provided to Apply") - } - result = &v1.UserDefinedNetwork{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("userdefinednetworks"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *userDefinedNetworks) ApplyStatus(ctx context.Context, userDefinedNetwork *userdefinednetworkv1.UserDefinedNetworkApplyConfiguration, opts metav1.ApplyOptions) (result *v1.UserDefinedNetwork, err error) { - if userDefinedNetwork == nil { - return nil, fmt.Errorf("userDefinedNetwork provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(userDefinedNetwork) - if err != nil { - return nil, err - } - - name := userDefinedNetwork.Name - if name == nil { - return nil, fmt.Errorf("userDefinedNetwork.Name must be provided to Apply") - } - - result = &v1.UserDefinedNetwork{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("userdefinednetworks"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/typed/userdefinednetwork/v1/userdefinednetwork_client.go b/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/typed/userdefinednetwork/v1/userdefinednetwork_client.go index 09466e6342..76d66f6716 100644 --- a/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/typed/userdefinednetwork/v1/userdefinednetwork_client.go +++ b/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/typed/userdefinednetwork/v1/userdefinednetwork_client.go @@ -27,6 +27,7 @@ import ( type K8sV1Interface interface { RESTClient() rest.Interface + ClusterUserDefinedNetworksGetter UserDefinedNetworksGetter } @@ -35,6 +36,10 @@ type K8sV1Client struct { restClient rest.Interface } +func (c *K8sV1Client) ClusterUserDefinedNetworks() ClusterUserDefinedNetworkInterface { + return newClusterUserDefinedNetworks(c) +} + func (c *K8sV1Client) UserDefinedNetworks(namespace string) UserDefinedNetworkInterface { return newUserDefinedNetworks(c, namespace) } diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go b/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go index badac5ffab..94ae5f9ba3 100644 --- a/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go +++ b/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go @@ -227,6 +227,7 @@ type SharedInformerFactory interface { // Start initializes all requested informers. They are handled in goroutines // which run until the stop channel gets closed. + // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. Start(stopCh <-chan struct{}) // Shutdown marks a factory as shutting down. At that point no new diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/generic.go b/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/generic.go index 75fd59278a..6c32f3836d 100644 --- a/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/generic.go +++ b/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/generic.go @@ -52,6 +52,8 @@ func (f *genericInformer) Lister() cache.GenericLister { func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { // Group=k8s.ovn.org, Version=v1 + case v1.SchemeGroupVersion.WithResource("clusteruserdefinednetworks"): + return &genericInformer{resource: resource.GroupResource(), informer: f.K8s().V1().ClusterUserDefinedNetworks().Informer()}, nil case v1.SchemeGroupVersion.WithResource("userdefinednetworks"): return &genericInformer{resource: resource.GroupResource(), informer: f.K8s().V1().UserDefinedNetworks().Informer()}, nil diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/userdefinednetwork/v1/clusteruserdefinednetwork.go b/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/userdefinednetwork/v1/clusteruserdefinednetwork.go new file mode 100644 index 0000000000..36e30305d0 --- /dev/null +++ b/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/userdefinednetwork/v1/clusteruserdefinednetwork.go @@ -0,0 +1,88 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + userdefinednetworkv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1" + versioned "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned" + internalinterfaces "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/internalinterfaces" + v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/listers/userdefinednetwork/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterUserDefinedNetworkInformer provides access to a shared informer and lister for +// ClusterUserDefinedNetworks. +type ClusterUserDefinedNetworkInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ClusterUserDefinedNetworkLister +} + +type clusterUserDefinedNetworkInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterUserDefinedNetworkInformer constructs a new informer for ClusterUserDefinedNetwork type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterUserDefinedNetworkInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterUserDefinedNetworkInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterUserDefinedNetworkInformer constructs a new informer for ClusterUserDefinedNetwork type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterUserDefinedNetworkInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.K8sV1().ClusterUserDefinedNetworks().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.K8sV1().ClusterUserDefinedNetworks().Watch(context.TODO(), options) + }, + }, + &userdefinednetworkv1.ClusterUserDefinedNetwork{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterUserDefinedNetworkInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterUserDefinedNetworkInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterUserDefinedNetworkInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&userdefinednetworkv1.ClusterUserDefinedNetwork{}, f.defaultInformer) +} + +func (f *clusterUserDefinedNetworkInformer) Lister() v1.ClusterUserDefinedNetworkLister { + return v1.NewClusterUserDefinedNetworkLister(f.Informer().GetIndexer()) +} diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/userdefinednetwork/v1/interface.go b/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/userdefinednetwork/v1/interface.go index ea0fd4bedc..4a32d8fb30 100644 --- a/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/userdefinednetwork/v1/interface.go +++ b/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/userdefinednetwork/v1/interface.go @@ -23,6 +23,8 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { + // ClusterUserDefinedNetworks returns a ClusterUserDefinedNetworkInformer. + ClusterUserDefinedNetworks() ClusterUserDefinedNetworkInformer // UserDefinedNetworks returns a UserDefinedNetworkInformer. UserDefinedNetworks() UserDefinedNetworkInformer } @@ -38,6 +40,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// ClusterUserDefinedNetworks returns a ClusterUserDefinedNetworkInformer. +func (v *version) ClusterUserDefinedNetworks() ClusterUserDefinedNetworkInformer { + return &clusterUserDefinedNetworkInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // UserDefinedNetworks returns a UserDefinedNetworkInformer. func (v *version) UserDefinedNetworks() UserDefinedNetworkInformer { return &userDefinedNetworkInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/apis/listers/userdefinednetwork/v1/clusteruserdefinednetwork.go b/go-controller/pkg/crd/userdefinednetwork/v1/apis/listers/userdefinednetwork/v1/clusteruserdefinednetwork.go new file mode 100644 index 0000000000..b764061e1e --- /dev/null +++ b/go-controller/pkg/crd/userdefinednetwork/v1/apis/listers/userdefinednetwork/v1/clusteruserdefinednetwork.go @@ -0,0 +1,47 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" + "k8s.io/client-go/tools/cache" +) + +// ClusterUserDefinedNetworkLister helps list ClusterUserDefinedNetworks. +// All objects returned here must be treated as read-only. +type ClusterUserDefinedNetworkLister interface { + // List lists all ClusterUserDefinedNetworks in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ClusterUserDefinedNetwork, err error) + // Get retrieves the ClusterUserDefinedNetwork from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.ClusterUserDefinedNetwork, error) + ClusterUserDefinedNetworkListerExpansion +} + +// clusterUserDefinedNetworkLister implements the ClusterUserDefinedNetworkLister interface. +type clusterUserDefinedNetworkLister struct { + listers.ResourceIndexer[*v1.ClusterUserDefinedNetwork] +} + +// NewClusterUserDefinedNetworkLister returns a new ClusterUserDefinedNetworkLister. +func NewClusterUserDefinedNetworkLister(indexer cache.Indexer) ClusterUserDefinedNetworkLister { + return &clusterUserDefinedNetworkLister{listers.New[*v1.ClusterUserDefinedNetwork](indexer, v1.Resource("clusteruserdefinednetwork"))} +} diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/apis/listers/userdefinednetwork/v1/expansion_generated.go b/go-controller/pkg/crd/userdefinednetwork/v1/apis/listers/userdefinednetwork/v1/expansion_generated.go index 28d2fe9506..f70a2faf1a 100644 --- a/go-controller/pkg/crd/userdefinednetwork/v1/apis/listers/userdefinednetwork/v1/expansion_generated.go +++ b/go-controller/pkg/crd/userdefinednetwork/v1/apis/listers/userdefinednetwork/v1/expansion_generated.go @@ -17,6 +17,10 @@ limitations under the License. package v1 +// ClusterUserDefinedNetworkListerExpansion allows custom methods to be added to +// ClusterUserDefinedNetworkLister. +type ClusterUserDefinedNetworkListerExpansion interface{} + // UserDefinedNetworkListerExpansion allows custom methods to be added to // UserDefinedNetworkLister. type UserDefinedNetworkListerExpansion interface{} diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/apis/listers/userdefinednetwork/v1/userdefinednetwork.go b/go-controller/pkg/crd/userdefinednetwork/v1/apis/listers/userdefinednetwork/v1/userdefinednetwork.go index d5df56c6d6..48f858a1b3 100644 --- a/go-controller/pkg/crd/userdefinednetwork/v1/apis/listers/userdefinednetwork/v1/userdefinednetwork.go +++ b/go-controller/pkg/crd/userdefinednetwork/v1/apis/listers/userdefinednetwork/v1/userdefinednetwork.go @@ -19,8 +19,8 @@ package v1 import ( v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -37,25 +37,17 @@ type UserDefinedNetworkLister interface { // userDefinedNetworkLister implements the UserDefinedNetworkLister interface. type userDefinedNetworkLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.UserDefinedNetwork] } // NewUserDefinedNetworkLister returns a new UserDefinedNetworkLister. func NewUserDefinedNetworkLister(indexer cache.Indexer) UserDefinedNetworkLister { - return &userDefinedNetworkLister{indexer: indexer} -} - -// List lists all UserDefinedNetworks in the indexer. -func (s *userDefinedNetworkLister) List(selector labels.Selector) (ret []*v1.UserDefinedNetwork, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.UserDefinedNetwork)) - }) - return ret, err + return &userDefinedNetworkLister{listers.New[*v1.UserDefinedNetwork](indexer, v1.Resource("userdefinednetwork"))} } // UserDefinedNetworks returns an object that can list and get UserDefinedNetworks. func (s *userDefinedNetworkLister) UserDefinedNetworks(namespace string) UserDefinedNetworkNamespaceLister { - return userDefinedNetworkNamespaceLister{indexer: s.indexer, namespace: namespace} + return userDefinedNetworkNamespaceLister{listers.NewNamespaced[*v1.UserDefinedNetwork](s.ResourceIndexer, namespace)} } // UserDefinedNetworkNamespaceLister helps list and get UserDefinedNetworks. @@ -73,26 +65,5 @@ type UserDefinedNetworkNamespaceLister interface { // userDefinedNetworkNamespaceLister implements the UserDefinedNetworkNamespaceLister // interface. type userDefinedNetworkNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all UserDefinedNetworks in the indexer for a given namespace. -func (s userDefinedNetworkNamespaceLister) List(selector labels.Selector) (ret []*v1.UserDefinedNetwork, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.UserDefinedNetwork)) - }) - return ret, err -} - -// Get retrieves the UserDefinedNetwork from the indexer for a given namespace and name. -func (s userDefinedNetworkNamespaceLister) Get(name string) (*v1.UserDefinedNetwork, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("userdefinednetwork"), name) - } - return obj.(*v1.UserDefinedNetwork), nil + listers.ResourceIndexer[*v1.UserDefinedNetwork] } diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/cudn.go b/go-controller/pkg/crd/userdefinednetwork/v1/cudn.go new file mode 100644 index 0000000000..b9a34f6a24 --- /dev/null +++ b/go-controller/pkg/crd/userdefinednetwork/v1/cudn.go @@ -0,0 +1,77 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// ClusterUserDefinedNetwork describe network request for a shared network across namespaces. +// +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:path=clusteruserdefinednetworks,scope=Cluster +// +kubebuilder:singular=clusteruserdefinednetwork +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +type ClusterUserDefinedNetwork struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:Required + // +required + Spec ClusterUserDefinedNetworkSpec `json:"spec"` + // +optional + Status ClusterUserDefinedNetworkStatus `json:"status,omitempty"` +} + +// ClusterUserDefinedNetworkSpec defines the desired state of ClusterUserDefinedNetwork. +type ClusterUserDefinedNetworkSpec struct { + // NamespaceSelector Label selector for which namespace network should be available for. + // +kubebuilder:validation:Required + // +required + NamespaceSelector metav1.LabelSelector `json:"namespaceSelector"` + + // Network is the user-defined-network spec + // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="Network spec is immutable" + // +required + Network NetworkSpec `json:"network"` +} + +// NetworkSpec defines the desired state of UserDefinedNetworkSpec. +// +union +type NetworkSpec struct { + // Topology describes network configuration. + // + // Allowed values are "Layer3", "Layer2". + // Layer3 topology creates a layer 2 segment per node, each with a different subnet. Layer 3 routing is used to interconnect node subnets. + // Layer2 topology creates one logical switch shared by all nodes. + // + // +kubebuilder:validation:Required + // +required + // +unionDiscriminator + Topology NetworkTopology `json:"topology"` + + // Layer3 is the Layer3 topology configuration. + // +optional + Layer3 *Layer3Config `json:"layer3,omitempty"` + + // Layer2 is the Layer2 topology configuration. + // +optional + Layer2 *Layer2Config `json:"layer2,omitempty"` +} + +// ClusterUserDefinedNetworkStatus contains the observed status of the ClusterUserDefinedNetwork. +type ClusterUserDefinedNetworkStatus struct { + // Conditions slice of condition objects indicating details about ClusterUserDefineNetwork status. + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// ClusterUserDefinedNetworkList contains a list of ClusterUserDefinedNetwork. +// +kubebuilder:object:root=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ClusterUserDefinedNetworkList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterUserDefinedNetwork `json:"items"` +} diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/register.go b/go-controller/pkg/crd/userdefinednetwork/v1/register.go index 05a40fbe73..15ac8e39ad 100644 --- a/go-controller/pkg/crd/userdefinednetwork/v1/register.go +++ b/go-controller/pkg/crd/userdefinednetwork/v1/register.go @@ -28,6 +28,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &UserDefinedNetwork{}, &UserDefinedNetworkList{}, + &ClusterUserDefinedNetwork{}, + &ClusterUserDefinedNetworkList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/types.go b/go-controller/pkg/crd/userdefinednetwork/v1/shared.go similarity index 74% rename from go-controller/pkg/crd/userdefinednetwork/v1/types.go rename to go-controller/pkg/crd/userdefinednetwork/v1/shared.go index d620cdf633..56092975d5 100644 --- a/go-controller/pkg/crd/userdefinednetwork/v1/types.go +++ b/go-controller/pkg/crd/userdefinednetwork/v1/shared.go @@ -16,54 +16,6 @@ limitations under the License. package v1 -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// UserDefinedNetwork describe network request for a Namespace. -// -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:resource:path=userdefinednetworks,scope=Namespaced -// +kubebuilder:singular=userdefinednetwork -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status -type UserDefinedNetwork struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - // +kubebuilder:validation:Required - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="Spec is immutable" - // +kubebuilder:validation:XValidation:rule="has(self.topology) && self.topology == 'Layer3' ? has(self.layer3): !has(self.layer3)", message="spec.layer3 is required when topology is Layer3 and forbidden otherwise" - // +kubebuilder:validation:XValidation:rule="has(self.topology) && self.topology == 'Layer2' ? has(self.layer2): !has(self.layer2)", message="spec.layer2 is required when topology is Layer2 and forbidden otherwise" - // +required - Spec UserDefinedNetworkSpec `json:"spec"` - // +optional - Status UserDefinedNetworkStatus `json:"status,omitempty"` -} - -// UserDefinedNetworkSpec defines the desired state of UserDefinedNetworkSpec. -// +union -type UserDefinedNetworkSpec struct { - // Topology describes network configuration. - // - // Allowed values are "Layer3", "Layer2". - // Layer3 topology creates a layer 2 segment per node, each with a different subnet. Layer 3 routing is used to interconnect node subnets. - // Layer2 topology creates one logical switch shared by all nodes. - // - // +kubebuilder:validation:Required - // +required - // +unionDiscriminator - Topology NetworkTopology `json:"topology"` - - // Layer3 is the Layer3 topology configuration. - // +optional - Layer3 *Layer3Config `json:"layer3,omitempty"` - - // Layer2 is the Layer2 topology configuration. - // +optional - Layer2 *Layer2Config `json:"layer2,omitempty"` -} - // +kubebuilder:validation:Enum=Layer2;Layer3 type NetworkTopology string @@ -74,6 +26,8 @@ const ( // +kubebuilder:validation:XValidation:rule="has(self.subnets) && size(self.subnets) > 0", message="Subnets is required for Layer3 topology" // +kubebuilder:validation:XValidation:rule="!has(self.joinSubnets) || has(self.role) && self.role == 'Primary'", message="JoinSubnets is only supported for Primary network" +// + TODO This validation does not work and needs to be fixed +// + kubebuilder:validation:XValidation:rule="!has(self.subnets) || !self.subnets.exists_one(i, cidr(i.cidr).ip().family() == 6) || self.mtu >= 1280", message="MTU should be greater than or equal to 1280 when IPv6 subent is used" type Layer3Config struct { // Role describes the network role in the pod. // @@ -89,7 +43,7 @@ type Layer3Config struct { // // MTU is optional, if not provided, the globally configured value in OVN-Kubernetes (defaults to 1400) is used for the network. // - // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Minimum=576 // +kubebuilder:validation:Maximum=65536 // +optional MTU int32 `json:"mtu,omitempty"` @@ -141,6 +95,8 @@ type Layer3Subnet struct { // +kubebuilder:validation:XValidation:rule="self.role != 'Primary' || has(self.subnets) && size(self.subnets) > 0", message="Subnets is required for Primary Layer2 topology" // +kubebuilder:validation:XValidation:rule="!has(self.joinSubnets) || has(self.role) && self.role == 'Primary'", message="JoinSubnets is only supported for Primary network" // +kubebuilder:validation:XValidation:rule="!has(self.ipamLifecycle) || has(self.subnets) && size(self.subnets) > 0", message="IPAMLifecycle is only supported when subnets are set" +// + TODO This validation does not work and needs to be fixed +// + kubebuilder:validation:XValidation:rule="!has(self.subnets) || !self.subnets.exists_one(i, cidr(i).ip().family() == 6) || self.mtu >= 1280", message="MTU should be greater than or equal to 1280 when IPv6 subent is used" type Layer2Config struct { // Role describes the network role in the pod. // @@ -154,7 +110,7 @@ type Layer2Config struct { // MTU is the maximum transmission unit for a network. // MTU is optional, if not provided, the globally configured value in OVN-Kubernetes (defaults to 1400) is used for the network. // - // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Minimum=576 // +kubebuilder:validation:Maximum=65536 // +optional MTU int32 `json:"mtu,omitempty"` @@ -213,17 +169,3 @@ type CIDR string // + TODO: Add the following validations when available (kube v1.31). // + kubebuilder:validation:XValidation:rule="size(self) != 2 || isCIDR(self[0]) && isCIDR(self[1]) && cidr(self[0]).ip().family() != cidr(self[1]).ip().family()", message="When 2 CIDRs are set, they must be from different IP families" type DualStackCIDRs []CIDR - -// UserDefinedNetworkList contains a list of UserDefinedNetwork. -// +kubebuilder:object:root=true -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type UserDefinedNetworkList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []UserDefinedNetwork `json:"items"` -} - -// UserDefinedNetworkStatus contains the observed status of the UserDefinedNetwork. -type UserDefinedNetworkStatus struct { - Conditions []metav1.Condition `json:"conditions,omitempty"` -} diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/spec.go b/go-controller/pkg/crd/userdefinednetwork/v1/spec.go new file mode 100644 index 0000000000..f868f56423 --- /dev/null +++ b/go-controller/pkg/crd/userdefinednetwork/v1/spec.go @@ -0,0 +1,25 @@ +package v1 + +func (s *UserDefinedNetworkSpec) GetTopology() NetworkTopology { + return s.Topology +} + +func (s *UserDefinedNetworkSpec) GetLayer3() *Layer3Config { + return s.Layer3 +} + +func (s *UserDefinedNetworkSpec) GetLayer2() *Layer2Config { + return s.Layer2 +} + +func (s *NetworkSpec) GetTopology() NetworkTopology { + return s.Topology +} + +func (s *NetworkSpec) GetLayer3() *Layer3Config { + return s.Layer3 +} + +func (s *NetworkSpec) GetLayer2() *Layer2Config { + return s.Layer2 +} diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/udn.go b/go-controller/pkg/crd/userdefinednetwork/v1/udn.go new file mode 100644 index 0000000000..c5f77f199c --- /dev/null +++ b/go-controller/pkg/crd/userdefinednetwork/v1/udn.go @@ -0,0 +1,63 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// UserDefinedNetwork describe network request for a Namespace. +// +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:path=userdefinednetworks,scope=Namespaced +// +kubebuilder:singular=userdefinednetwork +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +type UserDefinedNetwork struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="Spec is immutable" + // +kubebuilder:validation:XValidation:rule="has(self.topology) && self.topology == 'Layer3' ? has(self.layer3): !has(self.layer3)", message="spec.layer3 is required when topology is Layer3 and forbidden otherwise" + // +kubebuilder:validation:XValidation:rule="has(self.topology) && self.topology == 'Layer2' ? has(self.layer2): !has(self.layer2)", message="spec.layer2 is required when topology is Layer2 and forbidden otherwise" + // +required + Spec UserDefinedNetworkSpec `json:"spec"` + // +optional + Status UserDefinedNetworkStatus `json:"status,omitempty"` +} + +// UserDefinedNetworkSpec defines the desired state of UserDefinedNetworkSpec. +// +union +type UserDefinedNetworkSpec struct { + // Topology describes network configuration. + // + // Allowed values are "Layer3", "Layer2". + // Layer3 topology creates a layer 2 segment per node, each with a different subnet. Layer 3 routing is used to interconnect node subnets. + // Layer2 topology creates one logical switch shared by all nodes. + // + // +kubebuilder:validation:Required + // +required + // +unionDiscriminator + Topology NetworkTopology `json:"topology"` + + // Layer3 is the Layer3 topology configuration. + // +optional + Layer3 *Layer3Config `json:"layer3,omitempty"` + + // Layer2 is the Layer2 topology configuration. + // +optional + Layer2 *Layer2Config `json:"layer2,omitempty"` +} + +// UserDefinedNetworkStatus contains the observed status of the UserDefinedNetwork. +type UserDefinedNetworkStatus struct { + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// UserDefinedNetworkList contains a list of UserDefinedNetwork. +// +kubebuilder:object:root=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type UserDefinedNetworkList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []UserDefinedNetwork `json:"items"` +} diff --git a/go-controller/pkg/crd/userdefinednetwork/v1/zz_generated.deepcopy.go b/go-controller/pkg/crd/userdefinednetwork/v1/zz_generated.deepcopy.go index ec6dc1c632..cc8b6a3341 100644 --- a/go-controller/pkg/crd/userdefinednetwork/v1/zz_generated.deepcopy.go +++ b/go-controller/pkg/crd/userdefinednetwork/v1/zz_generated.deepcopy.go @@ -25,6 +25,108 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterUserDefinedNetwork) DeepCopyInto(out *ClusterUserDefinedNetwork) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterUserDefinedNetwork. +func (in *ClusterUserDefinedNetwork) DeepCopy() *ClusterUserDefinedNetwork { + if in == nil { + return nil + } + out := new(ClusterUserDefinedNetwork) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterUserDefinedNetwork) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterUserDefinedNetworkList) DeepCopyInto(out *ClusterUserDefinedNetworkList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterUserDefinedNetwork, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterUserDefinedNetworkList. +func (in *ClusterUserDefinedNetworkList) DeepCopy() *ClusterUserDefinedNetworkList { + if in == nil { + return nil + } + out := new(ClusterUserDefinedNetworkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterUserDefinedNetworkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterUserDefinedNetworkSpec) DeepCopyInto(out *ClusterUserDefinedNetworkSpec) { + *out = *in + in.NamespaceSelector.DeepCopyInto(&out.NamespaceSelector) + in.Network.DeepCopyInto(&out.Network) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterUserDefinedNetworkSpec. +func (in *ClusterUserDefinedNetworkSpec) DeepCopy() *ClusterUserDefinedNetworkSpec { + if in == nil { + return nil + } + out := new(ClusterUserDefinedNetworkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterUserDefinedNetworkStatus) DeepCopyInto(out *ClusterUserDefinedNetworkStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterUserDefinedNetworkStatus. +func (in *ClusterUserDefinedNetworkStatus) DeepCopy() *ClusterUserDefinedNetworkStatus { + if in == nil { + return nil + } + out := new(ClusterUserDefinedNetworkStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in DualStackCIDRs) DeepCopyInto(out *DualStackCIDRs) { { @@ -113,6 +215,32 @@ func (in *Layer3Subnet) DeepCopy() *Layer3Subnet { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { + *out = *in + if in.Layer3 != nil { + in, out := &in.Layer3, &out.Layer3 + *out = new(Layer3Config) + (*in).DeepCopyInto(*out) + } + if in.Layer2 != nil { + in, out := &in.Layer2, &out.Layer2 + *out = new(Layer2Config) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec. +func (in *NetworkSpec) DeepCopy() *NetworkSpec { + if in == nil { + return nil + } + out := new(NetworkSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *UserDefinedNetwork) DeepCopyInto(out *UserDefinedNetwork) { *out = *in diff --git a/go-controller/pkg/factory/factory.go b/go-controller/pkg/factory/factory.go index 0479173cc2..354bca1cc0 100644 --- a/go-controller/pkg/factory/factory.go +++ b/go-controller/pkg/factory/factory.go @@ -12,6 +12,13 @@ import ( anpinformerfactory "sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions" anpinformer "sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/apis/v1alpha1" + certificatesinformers "k8s.io/client-go/informers/certificates/v1" + + ocpnetworkapiv1alpha1 "github.com/openshift/api/network/v1alpha1" + ocpnetworkscheme "github.com/openshift/client-go/network/clientset/versioned/scheme" + ocpnetworkinformerfactory "github.com/openshift/client-go/network/informers/externalversions" + ocpnetworkinformerv1alpha1 "github.com/openshift/client-go/network/informers/externalversions/network/v1alpha1" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" egressfirewallapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1" egressfirewallscheme "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/clientset/versioned/scheme" @@ -20,12 +27,6 @@ import ( egressfirewalllister "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/listers/egressfirewall/v1" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" - certificatesinformers "k8s.io/client-go/informers/certificates/v1" - - ocpnetworkapiv1alpha1 "github.com/openshift/api/network/v1alpha1" - ocpnetworkscheme "github.com/openshift/client-go/network/clientset/versioned/scheme" - ocpnetworkinformerfactory "github.com/openshift/client-go/network/informers/externalversions" - ocpnetworkinformerv1alpha1 "github.com/openshift/client-go/network/informers/externalversions/network/v1alpha1" egressipapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1" egressipscheme "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/clientset/versioned/scheme" @@ -74,6 +75,11 @@ import ( userdefinednetworkapiinformerfactory "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions" userdefinednetworkinformer "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/userdefinednetwork/v1" + routeadvertisementsapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1" + routeadvertisementsscheme "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/scheme" + routeadvertisementsinformerfactory "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/informers/externalversions" + routeadvertisementsinformer "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/informers/externalversions/routeadvertisements/v1" + kapi "k8s.io/api/core/v1" discovery "k8s.io/api/discovery/v1" knet "k8s.io/api/networking/v1" @@ -113,6 +119,7 @@ type WatchFactory struct { ipamClaimsFactory ipamclaimsfactory.SharedInformerFactory nadFactory nadinformerfactory.SharedInformerFactory udnFactory userdefinednetworkapiinformerfactory.SharedInformerFactory + raFactory routeadvertisementsinformerfactory.SharedInformerFactory informers map[reflect.Type]*informer stopChan chan struct{} @@ -185,6 +192,7 @@ var ( MultiNetworkPolicyType reflect.Type = reflect.TypeOf(&mnpapi.MultiNetworkPolicy{}) IPAMClaimsType reflect.Type = reflect.TypeOf(&ipamclaimsapi.IPAMClaim{}) UserDefinedNetworkType reflect.Type = reflect.TypeOf(&userdefinednetworkapi.UserDefinedNetwork{}) + ClusterUserDefinedNetworkType reflect.Type = reflect.TypeOf(&userdefinednetworkapi.ClusterUserDefinedNetwork{}) // Resource types used in ovnk node NamespaceExGwType reflect.Type = reflect.TypeOf(&namespaceExGw{}) @@ -212,18 +220,6 @@ func NewMasterWatchFactory(ovnClientset *util.OVNMasterClientset) (*WatchFactory } } - if util.IsNetworkSegmentationSupportEnabled() { - if err := userdefinednetworkapi.AddToScheme(userdefinednetworkscheme.Scheme); err != nil { - return nil, err - } - - wf.udnFactory = userdefinednetworkapiinformerfactory.NewSharedInformerFactory(ovnClientset.UserDefinedNetworkClient, resyncInterval) - wf.informers[UserDefinedNetworkType], err = newInformer(UserDefinedNetworkType, wf.udnFactory.K8s().V1().UserDefinedNetworks().Informer()) - if err != nil { - return nil, err - } - } - return wf, nil } @@ -288,6 +284,9 @@ func NewOVNKubeControllerWatchFactory(ovnClientset *util.OVNKubeControllerClient if err := adminbasedpolicyapi.AddToScheme(adminbasedpolicyscheme.Scheme); err != nil { return nil, err } + if err := routeadvertisementsapi.AddToScheme(routeadvertisementsscheme.Scheme); err != nil { + return nil, err + } if err := nadapi.AddToScheme(nadscheme.Scheme); err != nil { return nil, err @@ -300,6 +299,9 @@ func NewOVNKubeControllerWatchFactory(ovnClientset *util.OVNKubeControllerClient if err := ipamclaimsapi.AddToScheme(ipamclaimsscheme.Scheme); err != nil { return nil, err } + if err := userdefinednetworkapi.AddToScheme(userdefinednetworkscheme.Scheme); err != nil { + return nil, err + } // For Services and Endpoints, pre-populate the shared Informer with one that // has a label selector excluding headless services. @@ -406,6 +408,19 @@ func NewOVNKubeControllerWatchFactory(ovnClientset *util.OVNKubeControllerClient } } + if util.IsNetworkSegmentationSupportEnabled() { + wf.udnFactory = userdefinednetworkapiinformerfactory.NewSharedInformerFactory(ovnClientset.UserDefinedNetworkClient, resyncInterval) + wf.informers[UserDefinedNetworkType], err = newInformer(UserDefinedNetworkType, wf.udnFactory.K8s().V1().UserDefinedNetworks().Informer()) + if err != nil { + return nil, err + } + + wf.informers[ClusterUserDefinedNetworkType], err = newInformer(ClusterUserDefinedNetworkType, wf.udnFactory.K8s().V1().ClusterUserDefinedNetworks().Informer()) + if err != nil { + return nil, err + } + } + if util.IsMultiNetworkPoliciesSupportEnabled() { wf.informers[MultiNetworkPolicyType], err = newInformer(MultiNetworkPolicyType, wf.mnpFactory.K8sCniCncfIo().V1beta1().MultiNetworkPolicies().Informer()) if err != nil { @@ -418,6 +433,12 @@ func NewOVNKubeControllerWatchFactory(ovnClientset *util.OVNKubeControllerClient wf.apbRouteFactory.K8s().V1().AdminPolicyBasedExternalRoutes().Informer() } + if util.IsRouteAdvertisementsEnabled() { + wf.raFactory = routeadvertisementsinformerfactory.NewSharedInformerFactory(ovnClientset.RouteAdvertisementsClient, resyncInterval) + // make sure shared informer is created for a factory, so on wf.raFactory.Start() it is initialized and caches are synced. + wf.raFactory.K8s().V1().RouteAdvertisements().Informer() + } + return wf, nil } @@ -534,6 +555,15 @@ func (wf *WatchFactory) Start() error { } } + if wf.raFactory != nil { + wf.raFactory.Start(wf.stopChan) + for oType, synced := range waitForCacheSyncWithTimeout(wf.raFactory, wf.stopChan) { + if !synced { + return fmt.Errorf("error in syncing cache for %v informer", oType) + } + } + } + return nil } @@ -575,6 +605,10 @@ func (wf *WatchFactory) Stop() { if wf.udnFactory != nil { wf.udnFactory.Shutdown() } + + if wf.raFactory != nil { + wf.raFactory.Shutdown() + } } // NewNodeWatchFactory initializes a watch factory with significantly fewer @@ -605,6 +639,9 @@ func NewNodeWatchFactory(ovnClientset *util.OVNNodeClientset, nodeName string) ( if err := nadapi.AddToScheme(nadscheme.Scheme); err != nil { return nil, err } + if err := routeadvertisementsapi.AddToScheme(routeadvertisementsscheme.Scheme); err != nil { + return nil, err + } var err error wf.informers[PodType], err = newQueuedInformer(PodType, wf.iFactory.Core().V1().Pods().Informer(), wf.stopChan, @@ -698,10 +735,16 @@ func NewNodeWatchFactory(ovnClientset *util.OVNNodeClientset, nodeName string) ( wf.apbRouteFactory.K8s().V1().AdminPolicyBasedExternalRoutes().Informer() } - // need to configure OVS interfaces for Pods on secondary networks in the DPU mode. + if util.IsRouteAdvertisementsEnabled() { + wf.raFactory = routeadvertisementsinformerfactory.NewSharedInformerFactory(ovnClientset.RouteAdvertisementsClient, resyncInterval) + // make sure shared informer is created for a factory, so on wf.raFactory.Start() it is initialized and caches are synced. + wf.raFactory.K8s().V1().RouteAdvertisements().Informer() + } + + // need to configure OVS interfaces for Pods on secondary networks in the DPU mode // need to know what is the primary network for a namespace on the CNI side, which // needs the NAD factory whenever the UDN feature is used. - if config.OVNKubernetesFeature.EnableMultiNetwork || config.OVNKubernetesFeature.EnableNetworkSegmentation { + if config.OVNKubernetesFeature.EnableMultiNetwork && (config.OVNKubernetesFeature.EnableNetworkSegmentation || config.OvnKubeNode.Mode == types.NodeModeDPU) { wf.nadFactory = nadinformerfactory.NewSharedInformerFactory(ovnClientset.NetworkAttchDefClient, resyncInterval) wf.informers[NetworkAttachmentDefinitionType], err = newInformer(NetworkAttachmentDefinitionType, wf.nadFactory.K8sCniCncfIo().V1().NetworkAttachmentDefinitions().Informer()) if err != nil { @@ -709,6 +752,19 @@ func NewNodeWatchFactory(ovnClientset *util.OVNNodeClientset, nodeName string) ( } } + if util.IsNetworkSegmentationSupportEnabled() { + wf.udnFactory = userdefinednetworkapiinformerfactory.NewSharedInformerFactory(ovnClientset.UserDefinedNetworkClient, resyncInterval) + wf.informers[UserDefinedNetworkType], err = newInformer(UserDefinedNetworkType, wf.udnFactory.K8s().V1().UserDefinedNetworks().Informer()) + if err != nil { + return nil, err + } + + wf.informers[ClusterUserDefinedNetworkType], err = newInformer(ClusterUserDefinedNetworkType, wf.udnFactory.K8s().V1().ClusterUserDefinedNetworks().Informer()) + if err != nil { + return nil, err + } + } + return wf, nil } @@ -751,6 +807,9 @@ func NewClusterManagerWatchFactory(ovnClientset *util.OVNClusterManagerClientset if err := userdefinednetworkapi.AddToScheme(userdefinednetworkscheme.Scheme); err != nil { return nil, err } + if err := routeadvertisementsapi.AddToScheme(routeadvertisementsscheme.Scheme); err != nil { + return nil, err + } // For Services and Endpoints, pre-populate the shared Informer with one that // has a label selector excluding headless services. @@ -854,6 +913,22 @@ func NewClusterManagerWatchFactory(ovnClientset *util.OVNClusterManagerClientset if err != nil { return nil, err } + wf.informers[ClusterUserDefinedNetworkType], err = newInformer(ClusterUserDefinedNetworkType, wf.udnFactory.K8s().V1().ClusterUserDefinedNetworks().Informer()) + if err != nil { + return nil, err + } + + // make sure namespace informer cache is initialized and synced on Start(). + wf.iFactory.Core().V1().Namespaces().Informer() + + // make sure pod informer cache is initialized and synced when on Start(). + wf.iFactory.Core().V1().Pods().Informer() + } + + if util.IsRouteAdvertisementsEnabled() { + wf.raFactory = routeadvertisementsinformerfactory.NewSharedInformerFactory(ovnClientset.RouteAdvertisementsClient, resyncInterval) + // make sure shared informer is created for a factory, so on wf.raFactory.Start() it is initialized and caches are synced. + wf.raFactory.K8s().V1().RouteAdvertisements().Informer() } return wf, nil @@ -1374,30 +1449,14 @@ func (wf *WatchFactory) GetEndpointSlice(namespace, name string) (*discovery.End // GetEndpointSlicesBySelector returns a list of EndpointSlices in a given namespace by the label selector func (wf *WatchFactory) GetEndpointSlicesBySelector(namespace string, labelSelector metav1.LabelSelector) ([]*discovery.EndpointSlice, error) { - selector, err := metav1.LabelSelectorAsSelector(&labelSelector) - if err != nil { - return nil, err - } - endpointSliceLister := wf.informers[EndpointSliceType].lister.(discoverylisters.EndpointSliceLister) - return endpointSliceLister.EndpointSlices(namespace).List(selector) + return util.GetEndpointSlicesBySelector(namespace, labelSelector, wf.informers[EndpointSliceType].lister.(discoverylisters.EndpointSliceLister)) } // GetServiceEndpointSlices returns the endpointSlices associated with a service for the specified network // if network is DefaultNetworkName the default endpointSlices are returned, otherwise the function looks for mirror endpointslices // for the specified network. func (wf *WatchFactory) GetServiceEndpointSlices(namespace, svcName, network string) ([]*discovery.EndpointSlice, error) { - var selector metav1.LabelSelector - if network == types.DefaultNetworkName { - selector = metav1.LabelSelector{MatchLabels: map[string]string{ - discovery.LabelServiceName: svcName, - }} - } else { - selector = metav1.LabelSelector{MatchLabels: map[string]string{ - types.LabelUserDefinedServiceName: svcName, - types.LabelUserDefinedEndpointSliceNetwork: network, - }} - } - return wf.GetEndpointSlicesBySelector(namespace, selector) + return util.GetServiceEndpointSlices(namespace, svcName, network, wf.informers[EndpointSliceType].lister.(discoverylisters.EndpointSliceLister)) } // GetNamespaces returns a list of namespaces in the cluster @@ -1541,10 +1600,18 @@ func (wf *WatchFactory) UserDefinedNetworkInformer() userdefinednetworkinformer. return wf.udnFactory.K8s().V1().UserDefinedNetworks() } +func (wf *WatchFactory) ClusterUserDefinedNetworkInformer() userdefinednetworkinformer.ClusterUserDefinedNetworkInformer { + return wf.udnFactory.K8s().V1().ClusterUserDefinedNetworks() +} + func (wf *WatchFactory) DNSNameResolverInformer() ocpnetworkinformerv1alpha1.DNSNameResolverInformer { return wf.dnsFactory.Network().V1alpha1().DNSNameResolvers() } +func (wf *WatchFactory) RouteAdvertisementsInformer() routeadvertisementsinformer.RouteAdvertisementsInformer { + return wf.raFactory.K8s().V1().RouteAdvertisements() +} + // withServiceNameAndNoHeadlessServiceSelector returns a LabelSelector (added to the // watcher for EndpointSlices) that will only choose EndpointSlices with a non-empty // "kubernetes.io/service-name" label and without "service.kubernetes.io/headless" @@ -1642,6 +1709,8 @@ type waitForCacheSyncer interface { } func waitForCacheSyncWithTimeout(factory waitForCacheSyncer, stopCh <-chan struct{}) map[reflect.Type]bool { + // Give some small time for sync. It helps significantly reduce unit tests time + time.Sleep(5 * time.Millisecond) return factory.WaitForCacheSync(util.GetChildStopChanWithTimeout(stopCh, types.InformerSyncTimeout)) } diff --git a/go-controller/pkg/factory/factory_test.go b/go-controller/pkg/factory/factory_test.go index 861d9e72aa..6815962e06 100644 --- a/go-controller/pkg/factory/factory_test.go +++ b/go-controller/pkg/factory/factory_test.go @@ -45,7 +45,7 @@ import ( ocpconfigapi "github.com/openshift/api/config/v1" ocpcloudnetworkclientsetfake "github.com/openshift/client-go/cloudnetwork/clientset/versioned/fake" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/factory/handler.go b/go-controller/pkg/factory/handler.go index d6f5f96f18..bcc5551f0b 100644 --- a/go-controller/pkg/factory/handler.go +++ b/go-controller/pkg/factory/handler.go @@ -535,6 +535,8 @@ func newInformerLister(oType reflect.Type, sharedInformer cache.SharedIndexInfor return ipamclaimslister.NewIPAMClaimLister(sharedInformer.GetIndexer()), nil case UserDefinedNetworkType: return userdefinednetworklister.NewUserDefinedNetworkLister(sharedInformer.GetIndexer()), nil + case ClusterUserDefinedNetworkType: + return userdefinednetworklister.NewClusterUserDefinedNetworkLister(sharedInformer.GetIndexer()), nil } return nil, fmt.Errorf("cannot create lister from type %v", oType) diff --git a/go-controller/pkg/factory/mocks/NodeWatchFactory.go b/go-controller/pkg/factory/mocks/NodeWatchFactory.go index 986f722c78..11c39843de 100644 --- a/go-controller/pkg/factory/mocks/NodeWatchFactory.go +++ b/go-controller/pkg/factory/mocks/NodeWatchFactory.go @@ -20,6 +20,10 @@ import ( mock "github.com/stretchr/testify/mock" + routeadvertisementsv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/informers/externalversions/routeadvertisements/v1" + + userdefinednetworkv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/userdefinednetwork/v1" + v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/adminpolicybasedroute/v1" ) @@ -198,6 +202,26 @@ func (_m *NodeWatchFactory) AddServiceHandler(handlerFuncs cache.ResourceEventHa return r0, r1 } +// ClusterUserDefinedNetworkInformer provides a mock function with given fields: +func (_m *NodeWatchFactory) ClusterUserDefinedNetworkInformer() userdefinednetworkv1.ClusterUserDefinedNetworkInformer { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ClusterUserDefinedNetworkInformer") + } + + var r0 userdefinednetworkv1.ClusterUserDefinedNetworkInformer + if rf, ok := ret.Get(0).(func() userdefinednetworkv1.ClusterUserDefinedNetworkInformer); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(userdefinednetworkv1.ClusterUserDefinedNetworkInformer) + } + } + + return r0 +} + // EgressIPInformer provides a mock function with given fields: func (_m *NodeWatchFactory) EgressIPInformer() egressipv1.EgressIPInformer { ret := _m.Called() @@ -718,6 +742,26 @@ func (_m *NodeWatchFactory) RemoveServiceHandler(handler *factory.Handler) { _m.Called(handler) } +// RouteAdvertisementsInformer provides a mock function with given fields: +func (_m *NodeWatchFactory) RouteAdvertisementsInformer() routeadvertisementsv1.RouteAdvertisementsInformer { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for RouteAdvertisementsInformer") + } + + var r0 routeadvertisementsv1.RouteAdvertisementsInformer + if rf, ok := ret.Get(0).(func() routeadvertisementsv1.RouteAdvertisementsInformer); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(routeadvertisementsv1.RouteAdvertisementsInformer) + } + } + + return r0 +} + // Shutdown provides a mock function with given fields: func (_m *NodeWatchFactory) Shutdown() { _m.Called() @@ -741,6 +785,26 @@ func (_m *NodeWatchFactory) Start() error { return r0 } +// UserDefinedNetworkInformer provides a mock function with given fields: +func (_m *NodeWatchFactory) UserDefinedNetworkInformer() userdefinednetworkv1.UserDefinedNetworkInformer { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for UserDefinedNetworkInformer") + } + + var r0 userdefinednetworkv1.UserDefinedNetworkInformer + if rf, ok := ret.Get(0).(func() userdefinednetworkv1.UserDefinedNetworkInformer); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(userdefinednetworkv1.UserDefinedNetworkInformer) + } + } + + return r0 +} + // NewNodeWatchFactory creates a new instance of NodeWatchFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewNodeWatchFactory(t interface { diff --git a/go-controller/pkg/factory/types.go b/go-controller/pkg/factory/types.go index c4bc924f4a..1a179e790a 100644 --- a/go-controller/pkg/factory/types.go +++ b/go-controller/pkg/factory/types.go @@ -3,6 +3,8 @@ package factory import ( adminpolicybasedrouteinformer "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/adminpolicybasedroute/v1" egressipinformer "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/egressip/v1" + routeadvertisementsinformer "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/informers/externalversions/routeadvertisements/v1" + userdefinednetworkinformer "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/userdefinednetwork/v1" nadinformer "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/k8s.cni.cncf.io/v1" @@ -60,6 +62,9 @@ type NodeWatchFactory interface { APBRouteInformer() adminpolicybasedrouteinformer.AdminPolicyBasedExternalRouteInformer EgressIPInformer() egressipinformer.EgressIPInformer NADInformer() nadinformer.NetworkAttachmentDefinitionInformer + UserDefinedNetworkInformer() userdefinednetworkinformer.UserDefinedNetworkInformer + ClusterUserDefinedNetworkInformer() userdefinednetworkinformer.ClusterUserDefinedNetworkInformer + RouteAdvertisementsInformer() routeadvertisementsinformer.RouteAdvertisementsInformer GetPods(namespace string) ([]*kapi.Pod, error) GetPod(namespace, name string) (*kapi.Pod, error) diff --git a/go-controller/pkg/generator/udn/masquerade_ips.go b/go-controller/pkg/generator/udn/masquerade_ips.go index 99ff461bef..5882fb809e 100644 --- a/go-controller/pkg/generator/udn/masquerade_ips.go +++ b/go-controller/pkg/generator/udn/masquerade_ips.go @@ -66,3 +66,23 @@ func allocateMasqueradeIPs(idName string, masqueradeSubnet string, networkID int } return masqueradeIPs, nil } + +// GetUDNGatewayMasqueradeIPs returns the list of gateway router masqueradeIPs for the given UDN's networkID +func GetUDNGatewayMasqueradeIPs(networkID int) ([]*net.IPNet, error) { + var masqIPs []*net.IPNet + if config.IPv4Mode { + v4MasqIPs, err := AllocateV4MasqueradeIPs(networkID) + if err != nil { + return nil, fmt.Errorf("failed to get v4 masquerade IP, networkID %d: %v", networkID, err) + } + masqIPs = append(masqIPs, v4MasqIPs.GatewayRouter) + } + if config.IPv6Mode { + v6MasqIPs, err := AllocateV6MasqueradeIPs(networkID) + if err != nil { + return nil, fmt.Errorf("failed to get v6 masquerade IP, networkID %d: %v", networkID, err) + } + masqIPs = append(masqIPs, v6MasqIPs.GatewayRouter) + } + return masqIPs, nil +} diff --git a/go-controller/pkg/informer/informer.go b/go-controller/pkg/informer/informer.go index 5496a37594..c3f0e25ca3 100644 --- a/go-controller/pkg/informer/informer.go +++ b/go-controller/pkg/informer/informer.go @@ -39,7 +39,7 @@ type eventHandler struct { // to have a copy of the object that needs deleting deletedIndexer cache.Indexer // workqueue is the queue we use to store work - workqueue workqueue.RateLimitingInterface + workqueue workqueue.TypedRateLimitingInterface[string] // add is the handler function that gets called when something has been added/updated add func(obj interface{}) error // delete is handler function that gets called when something has been deleted @@ -91,10 +91,13 @@ func NewDefaultEventHandler( name: name, informer: informer, deletedIndexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{}), - workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), name), - add: addFunc, - delete: deleteFunc, - updateFilter: updateFilterFunc, + workqueue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: name}, + ), + add: addFunc, + delete: deleteFunc, + updateFilter: updateFilterFunc, } _, err := informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { @@ -144,10 +147,13 @@ func NewTestEventHandler( name: name, informer: informer, deletedIndexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{}), - workqueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), - add: addFunc, - delete: deleteFunc, - updateFilter: updateFilterFunc, + workqueue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: name}, + ), + add: addFunc, + delete: deleteFunc, + updateFilter: updateFilterFunc, } _, err := informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { @@ -277,7 +283,7 @@ func (e *eventHandler) runWorker() { // processNextWorkItem processes work items from the queue func (e *eventHandler) processNextWorkItem() bool { // get item from the queue - obj, shutdown := e.workqueue.Get() + key, shutdown := e.workqueue.Get() // if we have to shutdown, return now if shutdown { @@ -285,20 +291,9 @@ func (e *eventHandler) processNextWorkItem() bool { } // process the item - err := func(obj interface{}) error { + err := func(key string) error { // make sure we call Done on the object once we've finshed processing - defer e.workqueue.Done(obj) - - var key string - var ok bool - // items on the queue should always be strings - if key, ok = obj.(string); !ok { - // As the item in the workqueue is actually invalid, we call - // Forget here else we'd go into a loop of attempting to - // process a work item that is invalid. - e.workqueue.Forget(obj) - return fmt.Errorf("expected string in workqueue but got %#v", obj) - } + defer e.workqueue.Done(key) // Run the syncHandler, passing it the namespace/name string of the // resource to be synced. @@ -310,16 +305,16 @@ func (e *eventHandler) processNextWorkItem() bool { return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error()) } // if we've exceeded MaxRetries, remove the item from the queue - e.workqueue.Forget(obj) + e.workqueue.Forget(key) return fmt.Errorf("dropping %s from %s queue as it has failed more than %d times", key, e.name, MaxRetries) } // Finally, if no error occurs we Forget this item so it does not // get queued again until another change happens. - e.workqueue.Forget(obj) + e.workqueue.Forget(key) klog.Infof("Successfully synced '%s'", key) return nil - }(obj) + }(key) // handle any errors that occurred if err != nil { diff --git a/go-controller/pkg/informer/informer_test.go b/go-controller/pkg/informer/informer_test.go index d2da9746f5..8c4b20dfe0 100644 --- a/go-controller/pkg/informer/informer_test.go +++ b/go-controller/pkg/informer/informer_test.go @@ -9,7 +9,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" kapi "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -523,7 +523,7 @@ var _ = Describe("Event Handler Internals", func() { name: "test", informer: factory.Core().V1().Pods().Informer(), deletedIndexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{}), - workqueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), + workqueue: workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[string]()), add: func(obj interface{}) error { return nil }, @@ -547,7 +547,7 @@ var _ = Describe("Event Handler Internals", func() { name: "test", informer: factory.Core().V1().Pods().Informer(), deletedIndexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{}), - workqueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), + workqueue: workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[string]()), add: func(obj interface{}) error { return nil }, @@ -576,7 +576,7 @@ var _ = Describe("Event Handler Internals", func() { name: "test", informer: factory.Core().V1().Pods().Informer(), deletedIndexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{}), - workqueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), + workqueue: workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[string]()), add: func(obj interface{}) error { return nil }, diff --git a/go-controller/pkg/kube/annotator_test.go b/go-controller/pkg/kube/annotator_test.go index 8d253a3b16..229a09b4d9 100644 --- a/go-controller/pkg/kube/annotator_test.go +++ b/go-controller/pkg/kube/annotator_test.go @@ -11,7 +11,7 @@ import ( kubeMocks "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube/mocks" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/kube/kube_suite_test.go b/go-controller/pkg/kube/kube_suite_test.go index 662f5fa1ea..0096d2b1de 100644 --- a/go-controller/pkg/kube/kube_suite_test.go +++ b/go-controller/pkg/kube/kube_suite_test.go @@ -3,7 +3,7 @@ package kube import ( "testing" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/kube/kube_test.go b/go-controller/pkg/kube/kube_test.go index 43fa939951..b27ed115b2 100644 --- a/go-controller/pkg/kube/kube_test.go +++ b/go-controller/pkg/kube/kube_test.go @@ -3,7 +3,7 @@ package kube import ( "context" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/go-controller/pkg/kubevirt/dhcp.go b/go-controller/pkg/kubevirt/dhcp.go index 266c4e2f5c..5e8534bd71 100644 --- a/go-controller/pkg/kubevirt/dhcp.go +++ b/go-controller/pkg/kubevirt/dhcp.go @@ -22,17 +22,74 @@ const ( dhcpLeaseTime = 3500 ) +type DHCPConfigsOpt = func(*dhcpConfigs) + type dhcpConfigs struct { V4 *nbdb.DHCPOptions V6 *nbdb.DHCPOptions } +func WithIPv4Router(router string) func(*dhcpConfigs) { + return func(configs *dhcpConfigs) { + if configs.V4 == nil { + return + } + configs.V4.Options["router"] = router + } +} + +func WithIPv4MTU(mtu int) func(*dhcpConfigs) { + return func(configs *dhcpConfigs) { + if configs.V4 == nil { + return + } + configs.V4.Options["mtu"] = fmt.Sprintf("%d", mtu) + } +} + +func WithIPv4DNSServer(dnsServer string) func(*dhcpConfigs) { + return func(configs *dhcpConfigs) { + if configs.V4 == nil { + return + } + configs.V4.Options["dns_server"] = dnsServer + } +} + +func WithIPv6DNSServer(dnsServer string) func(*dhcpConfigs) { + return func(configs *dhcpConfigs) { + // If there is no ipv6 dns server don't configure the option, this is + // quite common at dual stack envs since a ipv4 dns server can serve + // ipv6 AAAA records. + if dnsServer == "" { + return + } + if configs.V6 == nil { + return + } + configs.V6.Options["dns_server"] = dnsServer + } +} + func EnsureDHCPOptionsForMigratablePod(controllerName string, nbClient libovsdbclient.Client, watchFactory *factory.WatchFactory, pod *corev1.Pod, ips []*net.IPNet, lsp *nbdb.LogicalSwitchPort) error { + dnsServerIPv4, dnsServerIPv6, err := RetrieveDNSServiceClusterIPs(watchFactory) + if err != nil { + return fmt.Errorf("failed retrieving dns service cluster ip: %v", err) + } + + return EnsureDHCPOptionsForLSP(controllerName, nbClient, pod, ips, lsp, + WithIPv4Router(ARPProxyIPv4), + WithIPv4DNSServer(dnsServerIPv4), + WithIPv6DNSServer(dnsServerIPv6), + ) +} + +func EnsureDHCPOptionsForLSP(controllerName string, nbClient libovsdbclient.Client, pod *corev1.Pod, ips []*net.IPNet, lsp *nbdb.LogicalSwitchPort, opts ...DHCPConfigsOpt) error { vmKey := ExtractVMNameFromPod(pod) if vmKey == nil { return fmt.Errorf("missing vm label at pod %s/%s", pod.Namespace, pod.Name) } - dhcpConfigs, err := composeDHCPConfigs(watchFactory, controllerName, *vmKey, ips) + dhcpConfigs, err := composeDHCPConfigs(controllerName, *vmKey, ips, opts...) if err != nil { return fmt.Errorf("failed composing DHCP options: %v", err) } @@ -43,7 +100,7 @@ func EnsureDHCPOptionsForMigratablePod(controllerName string, nbClient libovsdbc return nil } -func composeDHCPConfigs(k8scli *factory.WatchFactory, controllerName string, vmKey ktypes.NamespacedName, podIPs []*net.IPNet) (*dhcpConfigs, error) { +func composeDHCPConfigs(controllerName string, vmKey ktypes.NamespacedName, podIPs []*net.IPNet, opts ...DHCPConfigsOpt) (*dhcpConfigs, error) { if len(podIPs) == 0 { return nil, fmt.Errorf("missing podIPs to compose dhcp options") } @@ -51,11 +108,6 @@ func composeDHCPConfigs(k8scli *factory.WatchFactory, controllerName string, vmK return nil, fmt.Errorf("missing vmName to compose dhcp options") } - dnsServerIPv4, dnsServerIPv6, err := retrieveDNSServiceClusterIPs(k8scli) - if err != nil { - return nil, fmt.Errorf("failed retrieving dns service cluster ip: %v", err) - } - dhcpConfigs := &dhcpConfigs{} for _, ip := range podIPs { _, cidr, err := net.ParseCIDR(ip.String()) @@ -63,15 +115,18 @@ func composeDHCPConfigs(k8scli *factory.WatchFactory, controllerName string, vmK return nil, fmt.Errorf("failed converting podIPs to cidr to configure dhcp: %v", err) } if utilnet.IsIPv4CIDR(cidr) { - dhcpConfigs.V4 = ComposeDHCPv4Options(cidr.String(), dnsServerIPv4, controllerName, vmKey) + dhcpConfigs.V4 = ComposeDHCPv4Options(cidr.String(), controllerName, vmKey) } else if utilnet.IsIPv6CIDR(cidr) { - dhcpConfigs.V6 = ComposeDHCPv6Options(cidr.String(), dnsServerIPv6, controllerName, vmKey) + dhcpConfigs.V6 = ComposeDHCPv6Options(cidr.String(), controllerName, vmKey) } } + for _, opt := range opts { + opt(dhcpConfigs) + } return dhcpConfigs, nil } -func retrieveDNSServiceClusterIPs(k8scli *factory.WatchFactory) (string, string, error) { +func RetrieveDNSServiceClusterIPs(k8scli *factory.WatchFactory) (string, string, error) { dnsServer, err := k8scli.GetService(config.Kubernetes.DNSServiceNamespace, config.Kubernetes.DNSServiceName) if err != nil { return "", "", err @@ -88,14 +143,12 @@ func retrieveDNSServiceClusterIPs(k8scli *factory.WatchFactory) (string, string, return clusterIPv4, clusterIPv6, nil } -func ComposeDHCPv4Options(cidr, dnsServer, controllerName string, vmKey ktypes.NamespacedName) *nbdb.DHCPOptions { +func ComposeDHCPv4Options(cidr, controllerName string, vmKey ktypes.NamespacedName) *nbdb.DHCPOptions { serverMAC := util.IPAddrToHWAddr(net.ParseIP(ARPProxyIPv4)).String() dhcpOptions := &nbdb.DHCPOptions{ Cidr: cidr, Options: map[string]string{ "lease_time": fmt.Sprintf("%d", dhcpLeaseTime), - "router": ARPProxyIPv4, - "dns_server": dnsServer, "server_id": ARPProxyIPv4, "server_mac": serverMAC, "hostname": fmt.Sprintf("%q", vmKey.Name), @@ -104,17 +157,15 @@ func ComposeDHCPv4Options(cidr, dnsServer, controllerName string, vmKey ktypes.N return composeDHCPOptions(controllerName, vmKey, dhcpOptions) } -func ComposeDHCPv6Options(cidr, dnsServer, controllerName string, vmKey ktypes.NamespacedName) *nbdb.DHCPOptions { +func ComposeDHCPv6Options(cidr, controllerName string, vmKey ktypes.NamespacedName) *nbdb.DHCPOptions { serverMAC := util.IPAddrToHWAddr(net.ParseIP(ARPProxyIPv6)).String() dhcpOptions := &nbdb.DHCPOptions{ Cidr: cidr, Options: map[string]string{ "server_id": serverMAC, + "fqdn": fmt.Sprintf("%q", vmKey.Name), // equivalent to ipv4 "hostname" option }, } - if dnsServer != "" { - dhcpOptions.Options["dns_server"] = dnsServer - } return composeDHCPOptions(controllerName, vmKey, dhcpOptions) } diff --git a/go-controller/pkg/kubevirt/dhcp_test.go b/go-controller/pkg/kubevirt/dhcp_test.go index c94e014cc2..5d4a99ecc3 100644 --- a/go-controller/pkg/kubevirt/dhcp_test.go +++ b/go-controller/pkg/kubevirt/dhcp_test.go @@ -3,39 +3,23 @@ package kubevirt import ( "net" - . "github.com/onsi/ginkgo" - . "github.com/onsi/ginkgo/extensions/table" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ktypes "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes/fake" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" ) var _ = Describe("Kubevirt", func() { type dhcpTest struct { cidrs []string controllerName, namespace, vmName string - dns *corev1.Service + opts []DHCPConfigsOpt expectedDHCPConfigs dhcpConfigs expectedError string } var ( - svc = func(namespace string, name string, clusterIPs []string) *corev1.Service { - return &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "kube-system", - Name: "kube-dns", - }, - Spec: corev1.ServiceSpec{ - ClusterIPs: clusterIPs, - }, - } - } key = func(namespace, name string) ktypes.NamespacedName { return ktypes.NamespacedName{Namespace: namespace, Name: name} } @@ -46,101 +30,181 @@ var _ = Describe("Kubevirt", func() { } ) DescribeTable("composing dhcp options should success", func(t dhcpTest) { - svcs := []corev1.Service{} - if t.dns != nil { - svcs = append(svcs, *t.dns) - } - fakeClient := &util.OVNMasterClientset{ - KubeClient: fake.NewSimpleClientset(&corev1.ServiceList{ - Items: svcs, - }), - } - watcher, err := factory.NewMasterWatchFactory(fakeClient) - Expect(err).NotTo(HaveOccurred()) - Expect(watcher.Start()).To(Succeed()) - cidrs := []*net.IPNet{} for _, cidr := range t.cidrs { cidrs = append(cidrs, parseCIDR(cidr)) } - obtaineddhcpConfigs, err := composeDHCPConfigs(watcher, t.controllerName, ktypes.NamespacedName{Namespace: t.namespace, Name: t.vmName}, cidrs) + obtaineddhcpConfigs, err := composeDHCPConfigs(t.controllerName, ktypes.NamespacedName{Namespace: t.namespace, Name: t.vmName}, cidrs, t.opts...) Expect(err).ToNot(HaveOccurred()) Expect(obtaineddhcpConfigs.V4).To(Equal(t.expectedDHCPConfigs.V4)) Expect(obtaineddhcpConfigs.V6).To(Equal(t.expectedDHCPConfigs.V6)) }, - Entry("IPv4 Single stack and k8s dns", dhcpTest{ - cidrs: []string{"192.168.25.0/24"}, - controllerName: "defaultController", - namespace: "namespace1", - vmName: "foo1", - dns: svc("kube-system", "kube-dns", []string{"192.167.23.44"}), - expectedDHCPConfigs: dhcpConfigs{V4: ComposeDHCPv4Options("192.168.25.0/24", "192.167.23.44", "defaultController", key("namespace1", "foo1"))}, + Entry("IPv4 Single stack and dns", dhcpTest{ + cidrs: []string{"192.168.25.0/24"}, + controllerName: "defaultController", + namespace: "namespace1", + vmName: "foo1", + opts: []DHCPConfigsOpt{ + WithIPv4DNSServer("192.167.23.44"), + WithIPv4Router("192.168.25.1"), + WithIPv4MTU(1500), + }, + expectedDHCPConfigs: dhcpConfigs{ + V4: &nbdb.DHCPOptions{ + Cidr: "192.168.25.0/24", + ExternalIDs: map[string]string{ + "k8s.ovn.org/owner-controller": "defaultController", + "k8s.ovn.org/owner-type": "VirtualMachine", + "k8s.ovn.org/name": "namespace1/foo1", + "k8s.ovn.org/cidr": "192.168.25.0/24", + "k8s.ovn.org/id": "defaultController:VirtualMachine:namespace1/foo1:192.168.25.0/24", + "k8s.ovn.org/zone": "local", + }, + Options: map[string]string{ + "lease_time": "3500", + "server_id": ARPProxyIPv4, + "server_mac": ARPProxyMAC, + "hostname": `"foo1"`, + "dns_server": "192.167.23.44", + "router": "192.168.25.1", + "mtu": "1500", + }, + }, + }, }), - Entry("IPv6 Single stack and k8s dns", dhcpTest{ - cidrs: []string{"2002:0:0:1234::/64"}, - controllerName: "defaultController", - namespace: "namespace1", - vmName: "foo1", - dns: svc("kube-system", "kube-dns", []string{"2001:1:2:3:4:5:6:7"}), - expectedDHCPConfigs: dhcpConfigs{V6: ComposeDHCPv6Options("2002:0:0:1234::/64", "2001:1:2:3:4:5:6:7", "defaultController", key("namespace1", "foo1"))}, + Entry("IPv6 Single stack and dns", dhcpTest{ + cidrs: []string{"2002:0:0:1234::/64"}, + controllerName: "defaultController", + namespace: "namespace1", + vmName: "foo1", + opts: []DHCPConfigsOpt{WithIPv6DNSServer("2001:1:2:3:4:5:6:7")}, + expectedDHCPConfigs: dhcpConfigs{ + V6: &nbdb.DHCPOptions{ + Cidr: "2002:0:0:1234::/64", + ExternalIDs: map[string]string{ + "k8s.ovn.org/owner-controller": "defaultController", + "k8s.ovn.org/owner-type": "VirtualMachine", + "k8s.ovn.org/name": "namespace1/foo1", + "k8s.ovn.org/cidr": "2002.0.0.1234../64", + "k8s.ovn.org/id": "defaultController:VirtualMachine:namespace1/foo1:2002.0.0.1234../64", + "k8s.ovn.org/zone": "local", + }, + Options: map[string]string{ + "server_id": "0a:58:6d:6d:c1:50", + "fqdn": `"foo1"`, + "dns_server": "2001:1:2:3:4:5:6:7", + }, + }, + }, }), - Entry("Dual stack and k8s dns", dhcpTest{ + Entry("Dual stack and dns", dhcpTest{ cidrs: []string{"192.168.25.0/24", "2002:0:0:1234::/64"}, controllerName: "defaultController", namespace: "namespace1", vmName: "foo1", - dns: svc("kube-system", "kube-dns", []string{"192.167.23.44", "2001:1:2:3:4:5:6:7"}), + opts: []DHCPConfigsOpt{ + WithIPv4DNSServer("192.167.23.44"), + WithIPv6DNSServer("2001:1:2:3:4:5:6:7"), + }, expectedDHCPConfigs: dhcpConfigs{ - V4: ComposeDHCPv4Options("192.168.25.0/24", "192.167.23.44", "defaultController", key("namespace1", "foo1")), - V6: ComposeDHCPv6Options("2002:0:0:1234::/64", "2001:1:2:3:4:5:6:7", "defaultController", key("namespace1", "foo1")), + V4: &nbdb.DHCPOptions{ + Cidr: "192.168.25.0/24", + ExternalIDs: map[string]string{ + "k8s.ovn.org/owner-controller": "defaultController", + "k8s.ovn.org/owner-type": "VirtualMachine", + "k8s.ovn.org/name": "namespace1/foo1", + "k8s.ovn.org/cidr": "192.168.25.0/24", + "k8s.ovn.org/id": "defaultController:VirtualMachine:namespace1/foo1:192.168.25.0/24", + "k8s.ovn.org/zone": "local", + }, + Options: map[string]string{ + "lease_time": "3500", + "server_id": ARPProxyIPv4, + "server_mac": ARPProxyMAC, + "hostname": `"foo1"`, + "dns_server": "192.167.23.44", + }, + }, + V6: &nbdb.DHCPOptions{ + Cidr: "2002:0:0:1234::/64", + ExternalIDs: map[string]string{ + "k8s.ovn.org/owner-controller": "defaultController", + "k8s.ovn.org/owner-type": "VirtualMachine", + "k8s.ovn.org/name": "namespace1/foo1", + "k8s.ovn.org/cidr": "2002.0.0.1234../64", + "k8s.ovn.org/id": "defaultController:VirtualMachine:namespace1/foo1:2002.0.0.1234../64", + "k8s.ovn.org/zone": "local", + }, + Options: map[string]string{ + "server_id": "0a:58:6d:6d:c1:50", + "fqdn": `"foo1"`, + "dns_server": "2001:1:2:3:4:5:6:7", + }, + }, }, }), - Entry("Dual stack and k8s dns with ipv4 only", dhcpTest{ + + Entry("Dual stack with single IPv4 dns server", dhcpTest{ cidrs: []string{"192.168.25.0/24", "2002:0:0:1234::/64"}, controllerName: "defaultController", namespace: "namespace1", vmName: "foo1", - dns: svc("kube-system", "kube-dns", []string{"192.167.23.44", ""}), + opts: []DHCPConfigsOpt{ + WithIPv4DNSServer("192.167.23.44"), + WithIPv6DNSServer(""), + }, expectedDHCPConfigs: dhcpConfigs{ - V4: ComposeDHCPv4Options("192.168.25.0/24", "192.167.23.44", "defaultController", key("namespace1", "foo1")), - V6: ComposeDHCPv6Options("2002:0:0:1234::/64", "", "defaultController", key("namespace1", "foo1")), + V4: &nbdb.DHCPOptions{ + Cidr: "192.168.25.0/24", + ExternalIDs: map[string]string{ + "k8s.ovn.org/owner-controller": "defaultController", + "k8s.ovn.org/owner-type": "VirtualMachine", + "k8s.ovn.org/name": "namespace1/foo1", + "k8s.ovn.org/cidr": "192.168.25.0/24", + "k8s.ovn.org/id": "defaultController:VirtualMachine:namespace1/foo1:192.168.25.0/24", + "k8s.ovn.org/zone": "local", + }, + Options: map[string]string{ + "lease_time": "3500", + "server_id": ARPProxyIPv4, + "server_mac": ARPProxyMAC, + "hostname": `"foo1"`, + "dns_server": "192.167.23.44", + }, + }, + V6: &nbdb.DHCPOptions{ + Cidr: "2002:0:0:1234::/64", + ExternalIDs: map[string]string{ + "k8s.ovn.org/owner-controller": "defaultController", + "k8s.ovn.org/owner-type": "VirtualMachine", + "k8s.ovn.org/name": "namespace1/foo1", + "k8s.ovn.org/cidr": "2002.0.0.1234../64", + "k8s.ovn.org/id": "defaultController:VirtualMachine:namespace1/foo1:2002.0.0.1234../64", + "k8s.ovn.org/zone": "local", + }, + Options: map[string]string{ + "server_id": "0a:58:6d:6d:c1:50", + "fqdn": `"foo1"`, + }, + }, }, }), ) DescribeTable("composing dhcp options should fail", func(t dhcpTest) { - svcs := []corev1.Service{} - if t.dns != nil { - svcs = append(svcs, *t.dns) - } - fakeClient := &util.OVNMasterClientset{ - KubeClient: fake.NewSimpleClientset(&corev1.ServiceList{ - Items: svcs, - }), - } - watcher, err := factory.NewMasterWatchFactory(fakeClient) - Expect(err).NotTo(HaveOccurred()) - Expect(watcher.Start()).To(Succeed()) - cidrs := []*net.IPNet{} for _, cidr := range t.cidrs { cidrs = append(cidrs, parseCIDR(cidr)) } - _, err = composeDHCPConfigs(watcher, t.controllerName, key(t.namespace, t.vmName), cidrs) + _, err := composeDHCPConfigs(t.controllerName, key(t.namespace, t.vmName), cidrs) Expect(err).To(MatchError(t.expectedError)) }, Entry("No cidr should fail", dhcpTest{ expectedError: "missing podIPs to compose dhcp options", }), - Entry("No dns should fail", dhcpTest{ - vmName: "vm1", - cidrs: []string{"192.168.3.0/24"}, - expectedError: `failed retrieving dns service cluster ip: service "kube-dns" not found`, - }), Entry("No hostname should fail", dhcpTest{ vmName: "", cidrs: []string{"192.168.25.0/24"}, - dns: svc("kube-system", "kube-dns", []string{"192.167.23.44"}), expectedError: "missing vmName to compose dhcp options", }), ) diff --git a/go-controller/pkg/kubevirt/external_ids.go b/go-controller/pkg/kubevirt/external_ids.go index 18dacd6d2c..328d243d7a 100644 --- a/go-controller/pkg/kubevirt/external_ids.go +++ b/go-controller/pkg/kubevirt/external_ids.go @@ -33,6 +33,11 @@ func externalIDsContainsVM(externalIDs map[string]string, vm *ktypes.NamespacedN if vm == nil { return false } + // FIXME: VM IDs have no DB IDs and therefore may clash with other LRPs that do contain DB IBs. They will always have ObjectNameKey + // set therefore we now depend on the following key to be present. Remove this when DB IDs are implemented. + if _, ok := externalIDs[OvnZoneExternalIDKey]; !ok { + return false + } externalIDsVM := extractVMFromExternalIDs(externalIDs) if externalIDsVM == nil { return false @@ -45,6 +50,11 @@ func externalIDsContainsVM(externalIDs map[string]string, vm *ktypes.NamespacedN // if the expected ovn zone corresponds with the one it created via the // OvnZoneExternalIDKey func ownsItAndIsOrphanOrWrongZone(externalIDs map[string]string, vms map[ktypes.NamespacedName]bool) bool { + // FIXME: VM IDs have no DB IDs and therefore may clash with other LRPs that do contain DB IBs. They will always have ObjectNameKey + // set therefore we now depend on the following key to be present. Remove this when DB IDs are implemented. + if _, ok := externalIDs[OvnZoneExternalIDKey]; !ok { + return false + } vm := extractVMFromExternalIDs(externalIDs) if vm == nil { return false // Not related to kubevirt diff --git a/go-controller/pkg/kubevirt/kubevirt_suite_test.go b/go-controller/pkg/kubevirt/kubevirt_suite_test.go index bb7fa22a9d..cce47b5bc1 100644 --- a/go-controller/pkg/kubevirt/kubevirt_suite_test.go +++ b/go-controller/pkg/kubevirt/kubevirt_suite_test.go @@ -3,7 +3,7 @@ package kubevirt import ( "testing" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/kubevirt/pod.go b/go-controller/pkg/kubevirt/pod.go index 8a28db7c8d..1f3a3139e2 100644 --- a/go-controller/pkg/kubevirt/pod.go +++ b/go-controller/pkg/kubevirt/pod.go @@ -3,6 +3,8 @@ package kubevirt import ( "fmt" "net" + "sort" + "strings" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -12,6 +14,7 @@ import ( kubevirtv1 "kubevirt.io/api/core/v1" libovsdbclient "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" @@ -199,7 +202,7 @@ func nodeContainsPodSubnet(watchFactory *factory.WatchFactory, nodeName string, return false, nil } -// ExtractVMNameFromPod retunes namespace and name of vm backed up but the pod +// ExtractVMNameFromPod returns namespace and name of vm backed up but the pod // for regular pods return nil func ExtractVMNameFromPod(pod *corev1.Pod) *ktypes.NamespacedName { vmName, ok := pod.Labels[kubevirtv1.VirtualMachineNameLabel] @@ -343,3 +346,136 @@ func ZoneContainsPodSubnetOrUntracked(watchFactory *factory.WatchFactory, lsMana // to a node return hostSubnets, !util.IsContainedInAnyCIDR(annotation.IPs[0], hostSubnets...), nil } + +// IsPodOwnedByVirtualMachine returns true if the pod is own by a +// kubevirt virtual machine, false otherwise. +func IsPodOwnedByVirtualMachine(pod *corev1.Pod) bool { + return ExtractVMNameFromPod(pod) != nil +} + +// IsPodAllowedForMigration determines whether a given pod is eligible for live migration +func IsPodAllowedForMigration(pod *corev1.Pod, netInfo util.NetInfo) bool { + return IsPodOwnedByVirtualMachine(pod) && netInfo.TopologyType() == ovntypes.Layer2Topology +} + +func isTargetPodReady(targetPod *corev1.Pod) bool { + if targetPod == nil { + return false + } + + // This annotation only appears on live migration scenarios, and it signals + // that target VM pod is ready to receive traffic, so we can route + // traffic to it. + targetReadyTimestamp := targetPod.Annotations[kubevirtv1.MigrationTargetReadyTimestamp] + + // VM is ready to receive traffic + return targetReadyTimestamp != "" +} + +func filterNotComplete(vmPods []*corev1.Pod) []*corev1.Pod { + var notCompletePods []*corev1.Pod + for _, vmPod := range vmPods { + if !util.PodCompleted(vmPod) { + notCompletePods = append(notCompletePods, vmPod) + } + } + + return notCompletePods +} + +func tooManyPodsError(livingPods []*corev1.Pod) error { + var podNames = make([]string, len(livingPods)) + for i := range livingPods { + podNames[i] = livingPods[i].Namespace + "/" + livingPods[i].Name + } + return fmt.Errorf("unexpected live migration state at pods: %s", strings.Join(podNames, ",")) +} + +// LiveMigrationState represents the various states of a live migration process. +type LiveMigrationState string + +const ( + // LiveMigrationInProgress indicates that a live migration is currently ongoing. + LiveMigrationInProgress LiveMigrationState = "InProgress" + + // LiveMigrationTargetDomainReady indicates that the target domain is ready to take over. + LiveMigrationTargetDomainReady LiveMigrationState = "TargetDomainReady" + + // LiveMigrationFailed indicates that the live migration process has failed. + LiveMigrationFailed LiveMigrationState = "Failed" +) + +// LiveMigrationStatus provides details about the current status of a live migration. +// It includes information about the source and target pods as well as the migration state. +type LiveMigrationStatus struct { + SourcePod *corev1.Pod // SourcePod is the original pod. + TargetPod *corev1.Pod // TargetPod is the destination pod. + State LiveMigrationState // State is the current state of the live migration. +} + +// IsTargetDomainReady returns true if the target domain in the live migration process is ready. +func (lm LiveMigrationStatus) IsTargetDomainReady() bool { + return lm.State == LiveMigrationTargetDomainReady +} + +// DiscoverLiveMigrationStatus determines the status of a live migration for a given pod. +// It analyzes the state of pods associated with a VirtualMachine (VM) to identify whether +// a live migration is in progress, the target domain is ready, or the migration has failed. +// +// Note: The function assumes that the pod is part of a VirtualMachine resource managed +// by KubeVirt. +func DiscoverLiveMigrationStatus(client *factory.WatchFactory, pod *corev1.Pod) (*LiveMigrationStatus, error) { + vmKey := ExtractVMNameFromPod(pod) + if vmKey == nil { + return nil, nil + } + + vmPods, err := client.GetPodsBySelector(pod.Namespace, metav1.LabelSelector{MatchLabels: map[string]string{kubevirtv1.VirtualMachineNameLabel: vmKey.Name}}) + if err != nil { + return nil, err + } + + // no migration + if len(vmPods) < 2 { + return nil, nil + } + + // Sort vmPods by creation time + sort.Slice(vmPods, func(i, j int) bool { + return vmPods[j].CreationTimestamp.After(vmPods[i].CreationTimestamp.Time) + }) + + targetPod := vmPods[len(vmPods)-1] + livingPods := filterNotComplete(vmPods) + if util.PodCompleted(targetPod) { + // if target pod failed, then there should be only one living source pod. + if len(livingPods) != 1 { + return nil, fmt.Errorf("unexpected live migration state: should have a single living pod") + } + return &LiveMigrationStatus{ + SourcePod: livingPods[0], + TargetPod: targetPod, + State: LiveMigrationFailed, + }, nil + } + + // no active migration + if len(livingPods) < 2 { + return nil, nil + } + + if len(livingPods) > 2 { + return nil, tooManyPodsError(livingPods) + } + + status := LiveMigrationStatus{ + SourcePod: livingPods[0], + TargetPod: livingPods[1], + State: LiveMigrationInProgress, + } + + if isTargetPodReady(status.TargetPod) { + status.State = LiveMigrationTargetDomainReady + } + return &status, nil +} diff --git a/go-controller/pkg/kubevirt/pod_test.go b/go-controller/pkg/kubevirt/pod_test.go new file mode 100644 index 0000000000..d34b5776a5 --- /dev/null +++ b/go-controller/pkg/kubevirt/pod_test.go @@ -0,0 +1,212 @@ +package kubevirt + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/rand" + kubevirtv1 "kubevirt.io/api/core/v1" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" +) + +const vmName = "test-vm" + +var _ = Describe("Kubevirt Pod", func() { + const ( + t0 = time.Duration(0) + t1 = time.Duration(1) + t2 = time.Duration(2) + t3 = time.Duration(3) + t4 = time.Duration(4) + ) + runningKvSourcePod := runningKubevirtPod(t0) + successfullyMigratedKvSourcePod := completedKubevirtPod(t1) + + failedMigrationKvTargetPod := failedKubevirtPod(t2) + successfulMigrationKvTargetPod := runningKubevirtPod(t3) + anotherFailedMigrationKvTargetPod := failedKubevirtPod(t4) + duringMigrationKvTargetPod := runningKubevirtPod(t4) + yetAnotherDuringMigrationKvTargetPod := runningKubevirtPod(t4) + readyMigrationKvTargetPod := domainReadyKubevirtPod(t4) + + type testParams struct { + pods []corev1.Pod + expectedError error + expectedMigrationStatus *LiveMigrationStatus + } + DescribeTable("DiscoverLiveMigrationStatus", func(params testParams) { + Expect(config.PrepareTestConfig()).To(Succeed()) + config.OVNKubernetesFeature.EnableNetworkSegmentation = true + config.OVNKubernetesFeature.EnableMultiNetwork = true + config.OVNKubernetesFeature.EnableInterconnect = true + + fakeClient := util.GetOVNClientset().GetOVNKubeControllerClientset() + wf, err := factory.NewOVNKubeControllerWatchFactory(fakeClient) + Expect(err).ToNot(HaveOccurred()) + + for _, pod := range params.pods { + _, err := fakeClient.KubeClient.CoreV1().Pods(pod.Namespace).Create(context.Background(), &pod, metav1.CreateOptions{}) + Expect(err).ToNot(HaveOccurred()) + } + + Expect(wf.Start()).To(Succeed()) + defer wf.Shutdown() + + currentPod := params.pods[0] + migrationStatus, err := DiscoverLiveMigrationStatus(wf, ¤tPod) + if params.expectedError == nil { + Expect(err).To(BeNil()) + } else { + Expect(err).To(MatchError(ContainSubstring(params.expectedError.Error()))) + } + + if params.expectedMigrationStatus == nil { + Expect(migrationStatus).To(BeNil()) + } else { + Expect(migrationStatus.State).To(Equal(params.expectedMigrationStatus.State)) + Expect(migrationStatus.SourcePod.Name).To(Equal(params.expectedMigrationStatus.SourcePod.Name)) + Expect(migrationStatus.TargetPod.Name).To(Equal(params.expectedMigrationStatus.TargetPod.Name)) + } + }, + Entry("returns nil when pod is not kubevirt related", + testParams{ + pods: []corev1.Pod{nonKubevirtPod()}, + }, + ), + Entry("returns nil when migration was not performed", + testParams{ + pods: []corev1.Pod{runningKvSourcePod}, + }, + ), + Entry("returns nil when there is no active migration", + testParams{ + pods: []corev1.Pod{successfullyMigratedKvSourcePod, successfulMigrationKvTargetPod}, + }, + ), + Entry("returns nil when there is no active migration (multiple migrations)", + testParams{ + pods: []corev1.Pod{successfullyMigratedKvSourcePod, failedMigrationKvTargetPod, successfulMigrationKvTargetPod}, + }, + ), + Entry("returns Migration in progress status when 2 pods are running, target pod is not yet ready", + testParams{ + pods: []corev1.Pod{runningKvSourcePod, duringMigrationKvTargetPod}, + expectedMigrationStatus: &LiveMigrationStatus{ + SourcePod: &runningKvSourcePod, + TargetPod: &duringMigrationKvTargetPod, + State: LiveMigrationInProgress, + }, + }, + ), + Entry("returns Migration Failed status when latest target pod failed", + testParams{ + pods: []corev1.Pod{runningKvSourcePod, failedMigrationKvTargetPod}, + expectedMigrationStatus: &LiveMigrationStatus{ + SourcePod: &runningKvSourcePod, + TargetPod: &failedMigrationKvTargetPod, + State: LiveMigrationFailed, + }, + }, + ), + Entry("returns Migration Failed status when latest target pod failed (multiple migrations)", + testParams{ + pods: []corev1.Pod{runningKvSourcePod, failedMigrationKvTargetPod, anotherFailedMigrationKvTargetPod}, + expectedMigrationStatus: &LiveMigrationStatus{ + SourcePod: &runningKvSourcePod, + TargetPod: &anotherFailedMigrationKvTargetPod, + State: LiveMigrationFailed, + }, + }, + ), + Entry("returns Migration Ready status when latest target pod is ready", + testParams{ + pods: []corev1.Pod{runningKvSourcePod, readyMigrationKvTargetPod}, + expectedMigrationStatus: &LiveMigrationStatus{ + SourcePod: &runningKvSourcePod, + TargetPod: &readyMigrationKvTargetPod, + State: LiveMigrationTargetDomainReady, + }, + }, + ), + Entry("returns Migration Ready status when latest target pod is ready (multiple migrations)", + testParams{ + pods: []corev1.Pod{runningKvSourcePod, failedMigrationKvTargetPod, readyMigrationKvTargetPod}, + expectedMigrationStatus: &LiveMigrationStatus{ + SourcePod: &runningKvSourcePod, + TargetPod: &readyMigrationKvTargetPod, + State: LiveMigrationTargetDomainReady, + }, + }, + ), + Entry("returns err when kubevirt VM has several living pods and target pod failed", + testParams{ + pods: []corev1.Pod{runningKvSourcePod, successfulMigrationKvTargetPod, anotherFailedMigrationKvTargetPod}, + expectedError: fmt.Errorf("unexpected live migration state: should have a single living pod"), + }, + ), + Entry("returns err when kubevirt VM has several living pods", + testParams{ + pods: []corev1.Pod{runningKvSourcePod, duringMigrationKvTargetPod, yetAnotherDuringMigrationKvTargetPod}, + expectedError: fmt.Errorf("unexpected live migration state at pods"), + }, + ), + ) +}) + +func completedKubevirtPod(creationOffset time.Duration) corev1.Pod { + return newKubevirtPod(corev1.PodSucceeded, nil, creationOffset) +} + +func failedKubevirtPod(creationOffset time.Duration) corev1.Pod { + return newKubevirtPod(corev1.PodFailed, nil, creationOffset) +} + +func runningKubevirtPod(creationOffset time.Duration) corev1.Pod { + return newKubevirtPod(corev1.PodRunning, nil, creationOffset) +} + +func domainReadyKubevirtPod(creationOffset time.Duration) corev1.Pod { + return newKubevirtPod(corev1.PodRunning, map[string]string{kubevirtv1.MigrationTargetReadyTimestamp: "some-timestamp"}, creationOffset) +} + +func nonKubevirtPod() corev1.Pod { + return corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "some-pod", + Namespace: corev1.NamespaceDefault, + }, + Spec: corev1.PodSpec{}, + } +} +func newKubevirtPod(phase corev1.PodPhase, annotations map[string]string, creationOffset time.Duration) corev1.Pod { + return corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "virt-launcher-" + vmName + rand.String(5), + Namespace: corev1.NamespaceDefault, + Annotations: annotations, + Labels: map[string]string{kubevirtv1.VirtualMachineNameLabel: vmName}, + CreationTimestamp: metav1.Time{Time: time.Now().Add(creationOffset)}, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + Phase: phase, + }, + } +} diff --git a/go-controller/pkg/kubevirt/router.go b/go-controller/pkg/kubevirt/router.go index 802a60b1cb..8b68d24c55 100644 --- a/go-controller/pkg/kubevirt/router.go +++ b/go-controller/pkg/kubevirt/router.go @@ -65,7 +65,8 @@ func DeleteRoutingForMigratedPod(nbClient libovsdbclient.Client, pod *corev1.Pod // // Both: // - static route with VM ip as dst-ip prefix and output port the LRP pointing to the VM's node switch -func EnsureLocalZonePodAddressesToNodeRoute(watchFactory *factory.WatchFactory, nbClient libovsdbclient.Client, lsManager *logicalswitchmanager.LogicalSwitchManager, pod *corev1.Pod, nadName string) error { +func EnsureLocalZonePodAddressesToNodeRoute(watchFactory *factory.WatchFactory, nbClient libovsdbclient.Client, + lsManager *logicalswitchmanager.LogicalSwitchManager, pod *corev1.Pod, nadName string, clusterSubnets []config.CIDRNetworkEntry) error { vmReady, err := virtualMachineReady(watchFactory, pod) if err != nil { return err @@ -94,7 +95,7 @@ func EnsureLocalZonePodAddressesToNodeRoute(watchFactory *factory.WatchFactory, if config.OVNKubernetesFeature.EnableInterconnect { // NOTE: EIP & ESVC use same route and if this is already present thanks to those features, // this will be a no-op - if err := libovsdbutil.CreateDefaultRouteToExternal(nbClient, types.OVNClusterRouter, types.GWRouterPrefix+pod.Spec.NodeName); err != nil { + if err := libovsdbutil.CreateDefaultRouteToExternal(nbClient, types.OVNClusterRouter, types.GWRouterPrefix+pod.Spec.NodeName, clusterSubnets); err != nil { return err } } @@ -122,7 +123,7 @@ func EnsureLocalZonePodAddressesToNodeRoute(watchFactory *factory.WatchFactory, Match: match, Action: nbdb.LogicalRouterPolicyActionReroute, Nexthops: []string{nodeGRAddress.IP.String()}, - Priority: types.EgressLiveMigrationReroutePiority, + Priority: types.EgressLiveMigrationReroutePriority, ExternalIDs: map[string]string{ OvnZoneExternalIDKey: OvnLocalZone, VirtualMachineExternalIDsKey: pod.Labels[kubevirtv1.VirtualMachineNameLabel], diff --git a/go-controller/pkg/libovsdb/libovsdb.go b/go-controller/pkg/libovsdb/libovsdb.go index 98f8a9c297..c7aaaa4882 100644 --- a/go-controller/pkg/libovsdb/libovsdb.go +++ b/go-controller/pkg/libovsdb/libovsdb.go @@ -23,6 +23,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/vswitchd" "github.com/prometheus/client_golang/prometheus" "gopkg.in/fsnotify/fsnotify.v1" "gopkg.in/natefinch/lumberjack.v2" @@ -226,6 +227,47 @@ func NewNBClientWithConfig(cfg config.OvnAuthConfig, promRegistry prometheus.Reg return c, nil } +// NewOVSClient creates a new openvswitch Database client +func NewOVSClient(stopCh <-chan struct{}) (client.Client, error) { + cfg := &config.OvnAuthConfig{ + Scheme: config.OvnDBSchemeUnix, + Address: "unix:/var/run/openvswitch/db.sock", + } + + return NewOVSClientWithConfig(*cfg, stopCh) +} + +func NewOVSClientWithConfig(cfg config.OvnAuthConfig, stopCh <-chan struct{}) (client.Client, error) { + dbModel, err := vswitchd.FullDatabaseModel() + if err != nil { + return nil, err + } + c, err := newClient(cfg, dbModel, stopCh) + if err != nil { + return nil, err + } + ctx, cancel := context.WithTimeout(context.Background(), types.OVSDBTimeout) + go func() { + <-stopCh + cancel() + }() + + _, err = c.Monitor(ctx, + c.NewMonitor( + client.WithTable(&vswitchd.OpenvSwitch{}), + client.WithTable(&vswitchd.Bridge{}), + client.WithTable(&vswitchd.Port{}), + client.WithTable(&vswitchd.Interface{}), + ), + ) + if err != nil { + c.Close() + return nil, err + } + + return c, nil +} + func createTLSConfig(certFile, privKeyFile, caCertFile, serverName string) (*tls.Config, error) { cert, err := tls.LoadX509KeyPair(certFile, privKeyFile) if err != nil { diff --git a/go-controller/pkg/libovsdb/ops/acl.go b/go-controller/pkg/libovsdb/ops/acl.go index aacc00706d..3d6e815145 100644 --- a/go-controller/pkg/libovsdb/ops/acl.go +++ b/go-controller/pkg/libovsdb/ops/acl.go @@ -22,7 +22,7 @@ func GetACLName(acl *nbdb.ACL) string { func getACLMutableFields(acl *nbdb.ACL) []interface{} { return []interface{}{&acl.Action, &acl.Direction, &acl.ExternalIDs, &acl.Log, &acl.Match, &acl.Meter, - &acl.Name, &acl.Options, &acl.Priority, &acl.Severity, &acl.Tier} + &acl.Name, &acl.Options, &acl.Priority, &acl.Severity, &acl.Tier, &acl.SampleNew, &acl.SampleEst} } type aclPredicate func(*nbdb.ACL) bool @@ -107,7 +107,7 @@ func SetACLLogging(acl *nbdb.ACL, severity nbdb.ACLSeverity, log bool) { // CreateOrUpdateACLsOps creates or updates the provided ACLs returning the // corresponding ops -func CreateOrUpdateACLsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, acls ...*nbdb.ACL) ([]libovsdb.Operation, error) { +func CreateOrUpdateACLsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, samplingConfig *SamplingConfig, acls ...*nbdb.ACL) ([]libovsdb.Operation, error) { opModels := make([]operationModel, 0, len(acls)) for i := range acls { // can't use i in the predicate, for loop replaces it in-memory @@ -117,6 +117,7 @@ func CreateOrUpdateACLsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operat // node ACLs won't have names set *acl.Name = fmt.Sprintf("%.63s", *acl.Name) } + opModels = addSample(samplingConfig, opModels, acl) opModel := operationModel{ Model: acl, OnModelUpdates: getACLMutableFields(acl), @@ -149,8 +150,8 @@ func UpdateACLsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, acl } // CreateOrUpdateACLs creates or updates the provided ACLs -func CreateOrUpdateACLs(nbClient libovsdbclient.Client, acls ...*nbdb.ACL) error { - ops, err := CreateOrUpdateACLsOps(nbClient, nil, acls...) +func CreateOrUpdateACLs(nbClient libovsdbclient.Client, samplingConfig *SamplingConfig, acls ...*nbdb.ACL) error { + ops, err := CreateOrUpdateACLsOps(nbClient, nil, samplingConfig, acls...) if err != nil { return err } diff --git a/go-controller/pkg/libovsdb/ops/acl_test.go b/go-controller/pkg/libovsdb/ops/acl_test.go index 2b2ad7a6d1..7f52bb01e7 100644 --- a/go-controller/pkg/libovsdb/ops/acl_test.go +++ b/go-controller/pkg/libovsdb/ops/acl_test.go @@ -146,7 +146,7 @@ func TestCreateOrUpdateACL(t *testing.T) { updatedACL := tt.finalACL.DeepCopy() updatedACL.UUID = initialACLs[0].UUID - err = CreateOrUpdateACLs(nbClient, updatedACL) + err = CreateOrUpdateACLs(nbClient, nil, updatedACL) if err != nil { t.Fatalf("test: \"%s\" failed to set up test harness: %v", tt.desc, err) } diff --git a/go-controller/pkg/libovsdb/ops/db_object_ids.go b/go-controller/pkg/libovsdb/ops/db_object_ids.go index 2275091f8c..b242d7d3ea 100644 --- a/go-controller/pkg/libovsdb/ops/db_object_ids.go +++ b/go-controller/pkg/libovsdb/ops/db_object_ids.go @@ -8,7 +8,7 @@ import ( ) type dbObjType int -type ownerType string +type ownerType = string type ExternalIDKey string func (key ExternalIDKey) String() string { @@ -225,7 +225,7 @@ func (objectIDs *DbObjectIDs) GetExternalIDs() map[string]string { func (objectIDs *DbObjectIDs) getExternalIDs(allowEmptyKeys bool) map[string]string { externalIDs := map[string]string{ OwnerControllerKey.String(): objectIDs.ownerControllerName, - OwnerTypeKey.String(): string(objectIDs.idsType.ownerObjectType), + OwnerTypeKey.String(): objectIDs.idsType.ownerObjectType, } for key, value := range objectIDs.objectIDs { externalIDs[key.String()] = value @@ -244,7 +244,7 @@ func (objectIDs *DbObjectIDs) getExternalIDs(allowEmptyKeys bool) map[string]str // in the DbObjectIDs.objectIDs, they will be replaced with empty strings. // String returns the representation of all the information set in DbObjectIDs. func (objectIDs *DbObjectIDs) String() string { - id := objectIDs.ownerControllerName + ":" + string(objectIDs.idsType.ownerObjectType) + id := objectIDs.ownerControllerName + ":" + objectIDs.idsType.ownerObjectType for _, key := range objectIDs.idsType.GetExternalIDKeys() { id += ":" + objectIDs.objectIDs[key] } @@ -258,7 +258,7 @@ func (objectIDs *DbObjectIDs) GetIDsType() *ObjectIDsType { // getUniqueID returns primary id that is build based on objectIDs values. // If at least one required key is missing, an error will be returned. func (objectIDs *DbObjectIDs) getUniqueID() (string, error) { - id := objectIDs.ownerControllerName + ":" + string(objectIDs.idsType.ownerObjectType) + id := objectIDs.ownerControllerName + ":" + objectIDs.idsType.ownerObjectType for _, key := range objectIDs.idsType.GetExternalIDKeys() { value, ok := objectIDs.objectIDs[key] if !ok { @@ -273,9 +273,9 @@ func (objectIDs *DbObjectIDs) getUniqueID() (string, error) { // on OwnerControllerKey key, and verifies OwnerControllerKey value matches given objectIDsType. // All the other ids from objectIDsType will be set to DbObjectIDs.objectIDs. func NewDbObjectIDsFromExternalIDs(objectIDsType *ObjectIDsType, externalIDs map[string]string) (*DbObjectIDs, error) { - if externalIDs[OwnerTypeKey.String()] != string(objectIDsType.ownerObjectType) { + if externalIDs[OwnerTypeKey.String()] != objectIDsType.ownerObjectType { return nil, fmt.Errorf("expected ExternalID %s to equal %s, got %s", - OwnerTypeKey, string(objectIDsType.ownerObjectType), externalIDs[OwnerTypeKey.String()]) + OwnerTypeKey, objectIDsType.ownerObjectType, externalIDs[OwnerTypeKey.String()]) } if externalIDs[OwnerControllerKey.String()] == "" { return nil, fmt.Errorf("required ExternalID %s is empty", OwnerControllerKey) diff --git a/go-controller/pkg/libovsdb/ops/db_object_types.go b/go-controller/pkg/libovsdb/ops/db_object_types.go index 9f0e8dfe87..bb2afeea11 100644 --- a/go-controller/pkg/libovsdb/ops/db_object_types.go +++ b/go-controller/pkg/libovsdb/ops/db_object_types.go @@ -9,6 +9,7 @@ const ( portGroup logicalRouterPolicy qos + nat ) const ( @@ -33,6 +34,7 @@ const ( NetpolNodeOwnerType ownerType = "NetpolNode" NetpolNamespaceOwnerType ownerType = "NetpolNamespace" VirtualMachineOwnerType ownerType = "VirtualMachine" + UDNEnabledServiceOwnerType ownerType = "UDNEnabledService" // NetworkPolicyPortIndexOwnerType is the old version of NetworkPolicyOwnerType, kept for sync only NetworkPolicyPortIndexOwnerType ownerType = "NetworkPolicyPortIndexOwnerType" // ClusterOwnerType means the object is cluster-scoped and doesn't belong to any k8s objects @@ -45,6 +47,7 @@ const ( PolicyDirectionKey ExternalIDKey = "direction" GressIdxKey ExternalIDKey = "gress-index" IPFamilyKey ExternalIDKey = "ip-family" + NetworkKey ExternalIDKey = "network" TypeKey ExternalIDKey = "type" IpKey ExternalIDKey = "ip" PortPolicyIndexKey ExternalIDKey = "port-policy-index" @@ -123,6 +126,7 @@ var AddressSetEgressIP = newObjectIDsType(addressSet, EgressIPOwnerType, []Exter // cluster-wide address set name ObjectNameKey, IPFamilyKey, + NetworkKey, }) var AddressSetEgressService = newObjectIDsType(addressSet, EgressServiceOwnerType, []ExternalIDKey{ @@ -131,6 +135,12 @@ var AddressSetEgressService = newObjectIDsType(addressSet, EgressServiceOwnerTyp IPFamilyKey, }) +var AddressSetUDNEnabledService = newObjectIDsType(addressSet, UDNEnabledServiceOwnerType, []ExternalIDKey{ + // cluster-wide address set name + ObjectNameKey, + IPFamilyKey, +}) + var ACLAdminNetworkPolicy = newObjectIDsType(acl, AdminNetworkPolicyOwnerType, []ExternalIDKey{ // anp name ObjectNameKey, @@ -308,6 +318,14 @@ var LogicalRouterPolicyEgressIP = newObjectIDsType(logicalRouterPolicy, EgressIP ObjectNameKey, // the IP Family for this policy, ip4 or ip6 or ip(dualstack) IPFamilyKey, + NetworkKey, +}) + +var NATEgressIP = newObjectIDsType(nat, EgressIPOwnerType, []ExternalIDKey{ + // for the NAT policy, it should be the "EIPName_Namespace/podName" + ObjectNameKey, + // the IP Family for this policy, ip4 or ip6 or ip(dualstack) + IPFamilyKey, }) var QoSEgressQoS = newObjectIDsType(qos, EgressQoSOwnerType, []ExternalIDKey{ diff --git a/go-controller/pkg/libovsdb/ops/lbgroup.go b/go-controller/pkg/libovsdb/ops/lbgroup.go index f18577d25e..854c8f2b2d 100644 --- a/go-controller/pkg/libovsdb/ops/lbgroup.go +++ b/go-controller/pkg/libovsdb/ops/lbgroup.go @@ -4,15 +4,16 @@ import ( "context" libovsdbclient "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/libovsdb/ovsdb" libovsdb "github.com/ovn-org/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" ) -// CreateOrUpdateLoadBalancerGroup creates or updates the provided load balancer -// group -func CreateOrUpdateLoadBalancerGroup(nbClient libovsdbclient.Client, group *nbdb.LoadBalancerGroup) error { +// CreateOrUpdateLoadBalancerGroupOps returns the ops to create or update the +// provided load balancer group +func CreateOrUpdateLoadBalancerGroupOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, group *nbdb.LoadBalancerGroup) ([]ovsdb.Operation, error) { // lb group has no fields other than name, safe to update just with non-default values opModel := operationModel{ Model: group, @@ -22,8 +23,11 @@ func CreateOrUpdateLoadBalancerGroup(nbClient libovsdbclient.Client, group *nbdb } m := newModelClient(nbClient) - _, err := m.CreateOrUpdate(opModel) - return err + ops, err := m.CreateOrUpdateOps(ops, opModel) + if err != nil { + return nil, err + } + return ops, nil } // AddLoadBalancersToGroupOps adds the provided load balancers to the provided diff --git a/go-controller/pkg/libovsdb/ops/loadbalancer.go b/go-controller/pkg/libovsdb/ops/loadbalancer.go index 7d18240378..097d37b4ca 100644 --- a/go-controller/pkg/libovsdb/ops/loadbalancer.go +++ b/go-controller/pkg/libovsdb/ops/loadbalancer.go @@ -137,3 +137,15 @@ func ListLoadBalancers(nbClient libovsdbclient.Client) ([]*nbdb.LoadBalancer, er err := nbClient.List(ctx, &lbs) return lbs, err } + +type loadBalancerPredicate func(*nbdb.LoadBalancer) bool + +// FindLoadBalancersWithPredicate looks up loadbalancers from the cache +// based on a given predicate +func FindLoadBalancersWithPredicate(nbClient libovsdbclient.Client, p loadBalancerPredicate) ([]*nbdb.LoadBalancer, error) { + found := []*nbdb.LoadBalancer{} + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + err := nbClient.WhereCache(p).List(ctx, &found) + return found, err +} diff --git a/go-controller/pkg/libovsdb/ops/model.go b/go-controller/pkg/libovsdb/ops/model.go index b52f958133..76b525fa48 100644 --- a/go-controller/pkg/libovsdb/ops/model.go +++ b/go-controller/pkg/libovsdb/ops/model.go @@ -7,6 +7,7 @@ import ( "github.com/ovn-org/libovsdb/client" "github.com/ovn-org/libovsdb/model" "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" @@ -50,6 +51,12 @@ func getUUID(model model.Model) string { return t.UUID case *nbdb.Meter: return t.UUID + case *nbdb.Sample: + return t.UUID + case *nbdb.SampleCollector: + return t.UUID + case *nbdb.SamplingApp: + return t.UUID case *nbdb.StaticMACBinding: return t.UUID case *sbdb.Chassis: @@ -113,6 +120,12 @@ func setUUID(model model.Model, uuid string) { t.UUID = uuid case *nbdb.Meter: t.UUID = uuid + case *nbdb.Sample: + t.UUID = uuid + case *nbdb.SampleCollector: + t.UUID = uuid + case *nbdb.SamplingApp: + t.UUID = uuid case *nbdb.StaticMACBinding: t.UUID = uuid case *sbdb.Chassis: @@ -229,6 +242,21 @@ func copyIndexes(model model.Model) model.Model { UUID: t.UUID, Name: t.Name, } + case *nbdb.Sample: + return &nbdb.Sample{ + UUID: t.UUID, + Metadata: t.Metadata, + } + case *nbdb.SampleCollector: + return &nbdb.SampleCollector{ + UUID: t.UUID, + ID: t.ID, + } + case *nbdb.SamplingApp: + return &nbdb.SamplingApp{ + UUID: t.UUID, + Type: t.Type, + } case *nbdb.StaticMACBinding: return &nbdb.StaticMACBinding{ UUID: t.UUID, @@ -327,6 +355,12 @@ func getListFromModel(model model.Model) interface{} { return &[]*nbdb.MeterBand{} case *nbdb.Meter: return &[]*nbdb.Meter{} + case *nbdb.Sample: + return &[]*nbdb.Sample{} + case *nbdb.SampleCollector: + return &[]*nbdb.SampleCollector{} + case *nbdb.SamplingApp: + return &[]*nbdb.SamplingApp{} case *nbdb.StaticMACBinding: return &[]*nbdb.StaticMACBinding{} case *sbdb.Chassis: @@ -437,7 +471,12 @@ func buildFailOnDuplicateOps(c client.Client, m model.Model) ([]ovsdb.Operation, Function: ovsdb.ConditionEqual, Value: t.Match, } - return c.WhereAll(t, condPriority, condMatch).Wait( + condExtID := model.Condition{ + Field: &t.ExternalIDs, + Function: ovsdb.ConditionIncludes, + Value: t.ExternalIDs, + } + return c.WhereAll(t, condPriority, condMatch, condExtID).Wait( ovsdb.WaitConditionNotEqual, &timeout, t, @@ -456,6 +495,64 @@ func buildFailOnDuplicateOps(c client.Client, m model.Model) ([]ovsdb.Operation, return c.WhereAny(m, cond).Wait(ovsdb.WaitConditionNotEqual, &timeout, m, field) } +// ModelUpdateField enumeration represents fields that can be updated on the supported models +type ModelUpdateField int + +const ( + LogicalSwitchPortAddresses ModelUpdateField = iota + LogicalSwitchPortType + LogicalSwitchPortTagRequest + LogicalSwitchPortOptions + LogicalSwitchPortPortSecurity + LogicalSwitchPortEnabled + + PortGroupACLs + PortGroupPorts + PortGroupExternalIDs +) + +// getFieldsToUpdate gets a model and a list of ModelUpdateField and returns a list of their related interface{} fields. +func getFieldsToUpdate(model model.Model, fieldNames []ModelUpdateField) []interface{} { + var fields []interface{} + switch t := model.(type) { + case *nbdb.LogicalSwitchPort: + for _, field := range fieldNames { + switch field { + case LogicalSwitchPortAddresses: + fields = append(fields, &t.Addresses) + case LogicalSwitchPortType: + fields = append(fields, &t.Type) + case LogicalSwitchPortTagRequest: + fields = append(fields, &t.TagRequest) + case LogicalSwitchPortOptions: + fields = append(fields, &t.Options) + case LogicalSwitchPortPortSecurity: + fields = append(fields, &t.PortSecurity) + case LogicalSwitchPortEnabled: + fields = append(fields, &t.Enabled) + default: + panic(fmt.Sprintf("getFieldsToUpdate: unknown or unsupported field %q for LogicalSwitchPort", field)) + } + } + case *nbdb.PortGroup: + for _, field := range fieldNames { + switch field { + case PortGroupACLs: + fields = append(fields, &t.ACLs) + case PortGroupPorts: + fields = append(fields, &t.Ports) + case PortGroupExternalIDs: + fields = append(fields, &t.ExternalIDs) + default: + panic(fmt.Sprintf("getFieldsToUpdate: unknown or unsupported field %q for PortGroup", field)) + } + } + default: + panic(fmt.Sprintf("getFieldsToUpdate: unknown model type %T", t)) + } + return fields +} + // getAllUpdatableFields returns a list of all of the columns/fields that can be updated for a model func getAllUpdatableFields(model model.Model) []interface{} { switch t := model.(type) { diff --git a/go-controller/pkg/libovsdb/ops/model_client_test.go b/go-controller/pkg/libovsdb/ops/model_client_test.go index b058951b1a..075c2b45bd 100644 --- a/go-controller/pkg/libovsdb/ops/model_client_test.go +++ b/go-controller/pkg/libovsdb/ops/model_client_test.go @@ -6,8 +6,6 @@ import ( "sort" "testing" - "github.com/onsi/ginkgo" - "github.com/onsi/gomega/types" "github.com/ovn-org/libovsdb/client" "github.com/ovn-org/libovsdb/model" @@ -44,7 +42,6 @@ type OperationModelTestCase struct { } func runTestCase(t *testing.T, tCase OperationModelTestCase) error { - ginkgo.By(tCase.name) dbSetup := libovsdbtest.TestSetup{ NBData: tCase.initialDB, } diff --git a/go-controller/pkg/libovsdb/ops/ovs/bridge.go b/go-controller/pkg/libovsdb/ops/ovs/bridge.go new file mode 100644 index 0000000000..ebd5a40f6a --- /dev/null +++ b/go-controller/pkg/libovsdb/ops/ovs/bridge.go @@ -0,0 +1,17 @@ +package ovs + +import ( + "context" + libovsdbclient "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/vswitchd" +) + +// ListBridges looks up all ovs bridges from the cache +func ListBridges(ovsClient libovsdbclient.Client) ([]*vswitchd.Bridge, error) { + ctx, cancel := context.WithTimeout(context.Background(), types.OVSDBTimeout) + defer cancel() + searchedBridges := []*vswitchd.Bridge{} + err := ovsClient.List(ctx, &searchedBridges) + return searchedBridges, err +} diff --git a/go-controller/pkg/libovsdb/ops/ovs/interface.go b/go-controller/pkg/libovsdb/ops/ovs/interface.go new file mode 100644 index 0000000000..debac58b35 --- /dev/null +++ b/go-controller/pkg/libovsdb/ops/ovs/interface.go @@ -0,0 +1,30 @@ +package ovs + +import ( + "context" + libovsdbclient "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/vswitchd" +) + +type interfacePredicate func(*vswitchd.Interface) bool + +// ListInterfaces looks up all ovs interfaces from the cache +func ListInterfaces(ovsClient libovsdbclient.Client) ([]*vswitchd.Interface, error) { + ctx, cancel := context.WithTimeout(context.Background(), types.OVSDBTimeout) + defer cancel() + searchedInterfaces := []*vswitchd.Interface{} + err := ovsClient.List(ctx, &searchedInterfaces) + return searchedInterfaces, err +} + +// FindInterfacesWithPredicate returns all the ovs interfaces in the cache +// that matches the lookup function +func FindInterfacesWithPredicate(ovsClient libovsdbclient.Client, p interfacePredicate) ([]*vswitchd.Interface, error) { + ctx, cancel := context.WithTimeout(context.Background(), types.OVSDBTimeout) + defer cancel() + searchedInterfaces := []*vswitchd.Interface{} + + err := ovsClient.WhereCache(p).List(ctx, &searchedInterfaces) + return searchedInterfaces, err +} diff --git a/go-controller/pkg/libovsdb/ops/ovs/openvswitch.go b/go-controller/pkg/libovsdb/ops/ovs/openvswitch.go new file mode 100644 index 0000000000..71c58766f1 --- /dev/null +++ b/go-controller/pkg/libovsdb/ops/ovs/openvswitch.go @@ -0,0 +1,25 @@ +package ovs + +import ( + "context" + "fmt" + libovsdbclient "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/vswitchd" +) + +// Get OpenvSwitch entry from the cache +func GetOpenvSwitch(ovsClient libovsdbclient.Client) (*vswitchd.OpenvSwitch, error) { + ctx, cancel := context.WithTimeout(context.Background(), types.OVSDBTimeout) + defer cancel() + openvSwitchList := []*vswitchd.OpenvSwitch{} + err := ovsClient.List(ctx, &openvSwitchList) + if err != nil { + return nil, err + } + if len(openvSwitchList) == 0 { + return nil, fmt.Errorf("no openvSwitch entry found") + } + + return openvSwitchList[0], err +} diff --git a/go-controller/pkg/libovsdb/ops/router.go b/go-controller/pkg/libovsdb/ops/router.go index 0eb8499abe..46eb6b5a82 100644 --- a/go-controller/pkg/libovsdb/ops/router.go +++ b/go-controller/pkg/libovsdb/ops/router.go @@ -8,9 +8,10 @@ import ( libovsdbclient "github.com/ovn-org/libovsdb/client" libovsdb "github.com/ovn-org/libovsdb/ovsdb" + "k8s.io/apimachinery/pkg/util/sets" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" - "k8s.io/apimachinery/pkg/util/sets" ) // ROUTER OPs @@ -256,6 +257,27 @@ func FindLogicalRouterPoliciesWithPredicate(nbClient libovsdbclient.Client, p lo return found, err } +// FindALogicalRouterPoliciesWithPredicate looks up a logical router policies from +// the cache based on a given predicate +func FindALogicalRouterPoliciesWithPredicate(nbClient libovsdbclient.Client, routerName string, p logicalRouterPolicyPredicate) ([]*nbdb.LogicalRouterPolicy, error) { + lr := &nbdb.LogicalRouter{Name: routerName} + router, err := GetLogicalRouter(nbClient, lr) + if err != nil { + return nil, err + } + + newPredicate := func(item *nbdb.LogicalRouterPolicy) bool { + for _, policyUUID := range router.Policies { + if policyUUID == item.UUID && p(item) { + return true + } + } + return false + } + + return FindLogicalRouterPoliciesWithPredicate(nbClient, newPredicate) +} + // GetLogicalRouterPolicy looks up a logical router policy from the cache func GetLogicalRouterPolicy(nbClient libovsdbclient.Client, policy *nbdb.LogicalRouterPolicy) (*nbdb.LogicalRouterPolicy, error) { found := []*nbdb.LogicalRouterPolicy{} @@ -644,7 +666,7 @@ func CreateOrReplaceLogicalRouterStaticRouteWithPredicate(nbClient libovsdbclien lr := &nbdb.LogicalRouter{Name: routerName} router, err := GetLogicalRouter(nbClient, lr) if err != nil { - return err + return fmt.Errorf("unable to get logical router %s: %w", routerName, err) } newPredicate := func(item *nbdb.LogicalRouterStaticRoute) bool { for _, routeUUID := range router.StaticRoutes { @@ -656,7 +678,7 @@ func CreateOrReplaceLogicalRouterStaticRouteWithPredicate(nbClient libovsdbclien } routes, err := FindLogicalRouterStaticRoutesWithPredicate(nbClient, newPredicate) if err != nil { - return err + return fmt.Errorf("unable to get logical router static routes with predicate on router %s: %w", routerName, err) } var ops []libovsdb.Operation @@ -697,7 +719,7 @@ func CreateOrReplaceLogicalRouterStaticRouteWithPredicate(nbClient libovsdbclien ops, err = CreateOrUpdateLogicalRouterStaticRoutesWithPredicateOps(nbClient, ops, routerName, lrsr, nil, fields...) if err != nil { - return err + return fmt.Errorf("unable to get create or update logical router static routes on router %s: %w", routerName, err) } _, err = TransactAndCheck(nbClient, ops) return err @@ -886,6 +908,7 @@ func buildNAT( logicalPort string, externalMac string, externalIDs map[string]string, + match string, ) *nbdb.NAT { nat := &nbdb.NAT{ Type: natType, @@ -893,6 +916,7 @@ func buildNAT( LogicalIP: logicalIP, Options: map[string]string{"stateless": "false"}, ExternalIDs: externalIDs, + Match: match, } if logicalPort != "" { @@ -912,6 +936,16 @@ func BuildSNAT( logicalIP *net.IPNet, logicalPort string, externalIDs map[string]string, +) *nbdb.NAT { + return BuildSNATWithMatch(externalIP, logicalIP, logicalPort, externalIDs, "") +} + +func BuildSNATWithMatch( + externalIP *net.IP, + logicalIP *net.IPNet, + logicalPort string, + externalIDs map[string]string, + match string, ) *nbdb.NAT { externalIPStr := "" if externalIP != nil { @@ -923,7 +957,7 @@ func BuildSNAT( if logicalIPMask != 32 && logicalIPMask != 128 { logicalIPStr = logicalIP.String() } - return buildNAT(nbdb.NATTypeSNAT, externalIPStr, logicalIPStr, logicalPort, "", externalIDs) + return buildNAT(nbdb.NATTypeSNAT, externalIPStr, logicalIPStr, logicalPort, "", externalIDs, match) } // BuildDNATAndSNAT builds a logical router DNAT/SNAT @@ -933,6 +967,17 @@ func BuildDNATAndSNAT( logicalPort string, externalMac string, externalIDs map[string]string, +) *nbdb.NAT { + return BuildDNATAndSNATWithMatch(externalIP, logicalIP, logicalPort, externalMac, externalIDs, "") +} + +func BuildDNATAndSNATWithMatch( + externalIP *net.IP, + logicalIP *net.IPNet, + logicalPort string, + externalMac string, + externalIDs map[string]string, + match string, ) *nbdb.NAT { externalIPStr := "" if externalIP != nil { @@ -948,13 +993,18 @@ func BuildDNATAndSNAT( logicalIPStr, logicalPort, externalMac, - externalIDs) + externalIDs, + match) } -// isEquivalentNAT if it has same uuid. Otherwise, check if types match. -// ExternalIP must be unique amonst non-SNATs; -// LogicalIP must be unique amonst SNATs; -// If provided, LogicalPort is expected to match; +// isEquivalentNAT checks if the `searched` NAT is equivalent to `existing`. +// Returns true if the UUID is set in `searched` and matches the UUID of `existing`. +// Otherwise, perform the following checks: +// - Compare the Type and Match fields. +// - Compare ExternalIP if it is set in `searched`. +// - Compare LogicalIP if the Type in `searched` is SNAT. +// - Compare LogicalPort if it is set in `searched`. +// - Ensure that all ExternalIDs of `searched` exist and have the same value in `existing`. func isEquivalentNAT(existing *nbdb.NAT, searched *nbdb.NAT) bool { // Simple case: uuid was provided. if searched.UUID != "" && existing.UUID == searched.UUID { @@ -965,6 +1015,10 @@ func isEquivalentNAT(existing *nbdb.NAT, searched *nbdb.NAT) bool { return false } + if searched.Match != existing.Match { + return false + } + // Compre externalIP if its not empty. if searched.ExternalIP != "" && searched.ExternalIP != existing.ExternalIP { return false @@ -1187,3 +1241,22 @@ func DeleteNATsWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.O m := newModelClient(nbClient) return m.DeleteOps(ops, opModels...) } + +func UpdateNATOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, nats ...*nbdb.NAT) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(nats)) + for i := range nats { + nat := nats[i] + opModel := []operationModel{ + { + Model: nat, + OnModelUpdates: onModelUpdatesAllNonDefault(), + ErrNotFound: true, + BulkOp: false, + }, + } + opModels = append(opModels, opModel...) + } + + m := newModelClient(nbClient) + return m.CreateOrUpdateOps(ops, opModels...) +} diff --git a/go-controller/pkg/libovsdb/ops/sample.go b/go-controller/pkg/libovsdb/ops/sample.go new file mode 100644 index 0000000000..7f4f527d1b --- /dev/null +++ b/go-controller/pkg/libovsdb/ops/sample.go @@ -0,0 +1,221 @@ +package ops + +import ( + "golang.org/x/net/context" + "hash/fnv" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/libovsdb/model" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +func CreateOrUpdateSampleCollector(nbClient libovsdbclient.Client, collector *nbdb.SampleCollector) error { + opModel := operationModel{ + Model: collector, + OnModelUpdates: onModelUpdatesAllNonDefault(), + ErrNotFound: false, + BulkOp: false, + } + + m := newModelClient(nbClient) + _, err := m.CreateOrUpdate(opModel) + return err +} + +func UpdateSampleCollectorExternalIDs(nbClient libovsdbclient.Client, collector *nbdb.SampleCollector) error { + opModel := operationModel{ + Model: collector, + OnModelUpdates: []interface{}{&collector.ExternalIDs}, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + _, err := m.CreateOrUpdate(opModel) + return err +} + +func DeleteSampleCollector(nbClient libovsdbclient.Client, collector *nbdb.SampleCollector) error { + opModel := operationModel{ + Model: collector, + ErrNotFound: false, + BulkOp: false, + } + m := newModelClient(nbClient) + return m.Delete(opModel) +} + +func DeleteSampleCollectorWithPredicate(nbClient libovsdbclient.Client, p func(collector *nbdb.SampleCollector) bool) error { + opModel := operationModel{ + Model: &nbdb.SampleCollector{}, + ModelPredicate: p, + ErrNotFound: false, + BulkOp: true, + } + m := newModelClient(nbClient) + return m.Delete(opModel) +} + +func FindSampleCollectorWithPredicate(nbClient libovsdbclient.Client, p func(*nbdb.SampleCollector) bool) ([]*nbdb.SampleCollector, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + collectors := []*nbdb.SampleCollector{} + err := nbClient.WhereCache(p).List(ctx, &collectors) + return collectors, err +} + +func ListSampleCollectors(nbClient libovsdbclient.Client) ([]*nbdb.SampleCollector, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + collectors := []*nbdb.SampleCollector{} + err := nbClient.List(ctx, &collectors) + return collectors, err +} + +func CreateOrUpdateSamplingAppsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, samplingApps ...*nbdb.SamplingApp) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(samplingApps)) + for i := range samplingApps { + // can't use i in the predicate, for loop replaces it in-memory + samplingApp := samplingApps[i] + opModel := operationModel{ + Model: samplingApp, + OnModelUpdates: onModelUpdatesAllNonDefault(), + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + modelClient := newModelClient(nbClient) + return modelClient.CreateOrUpdateOps(ops, opModels...) +} + +func DeleteSamplingAppsWithPredicate(nbClient libovsdbclient.Client, p func(collector *nbdb.SamplingApp) bool) error { + opModel := operationModel{ + Model: &nbdb.SamplingApp{}, + ModelPredicate: p, + ErrNotFound: false, + BulkOp: true, + } + m := newModelClient(nbClient) + return m.Delete(opModel) +} + +func FindSample(nbClient libovsdbclient.Client, sampleMetadata int) (*nbdb.Sample, error) { + sample := &nbdb.Sample{ + Metadata: sampleMetadata, + } + return GetSample(nbClient, sample) +} + +func GetSample(nbClient libovsdbclient.Client, sample *nbdb.Sample) (*nbdb.Sample, error) { + found := []*nbdb.Sample{} + opModel := operationModel{ + Model: sample, + ExistingResult: &found, + ErrNotFound: true, + BulkOp: false, + } + modelClient := newModelClient(nbClient) + err := modelClient.Lookup(opModel) + if err != nil { + return nil, err + } + return found[0], err +} + +type SampleFeature = string + +const ( + EgressFirewallSample SampleFeature = "EgressFirewall" + NetworkPolicySample SampleFeature = "NetworkPolicy" + AdminNetworkPolicySample SampleFeature = "AdminNetworkPolicy" + MulticastSample SampleFeature = "Multicast" + UDNIsolationSample SampleFeature = "UDNIsolation" +) + +// SamplingConfig is used to configure sampling for different db objects. +type SamplingConfig struct { + featureCollectors map[SampleFeature][]string +} + +func NewSamplingConfig(featureCollectors map[SampleFeature][]string) *SamplingConfig { + return &SamplingConfig{ + featureCollectors: featureCollectors, + } +} + +func addSample(c *SamplingConfig, opModels []operationModel, model model.Model) []operationModel { + switch t := model.(type) { + case *nbdb.ACL: + return createOrUpdateSampleForACL(opModels, c, t) + } + return opModels +} + +// createOrUpdateSampleForACL should be called before acl operationModel is appended to opModels. +func createOrUpdateSampleForACL(opModels []operationModel, c *SamplingConfig, acl *nbdb.ACL) []operationModel { + if c == nil { + acl.SampleEst = nil + acl.SampleNew = nil + return opModels + } + collectors := c.featureCollectors[getACLSampleFeature(acl)] + if len(collectors) == 0 { + acl.SampleEst = nil + acl.SampleNew = nil + return opModels + } + aclID := GetACLSampleID(acl) + sample := &nbdb.Sample{ + Collectors: collectors, + // 32 bits + Metadata: int(aclID), + } + opModel := operationModel{ + Model: sample, + DoAfter: func() { + acl.SampleEst = &sample.UUID + acl.SampleNew = &sample.UUID + }, + OnModelUpdates: []interface{}{&sample.Collectors}, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + return opModels +} + +func GetACLSampleID(acl *nbdb.ACL) uint32 { + // primaryID is unique for each ACL, but established connections will keep sampleID that is set on + // connection creation. Here is the situation we want to avoid: + // 1. ACL1 is created with sampleID=1 (e.g. based on ANP namespace+name+...+rule index with action Allow) + // 2. connection A is established with sampleID=1, sample is decoded to say "Allowed by ANP namespace+name" + // 3. ACL1 is updated with sampleID=1 (e.g. now same rule in ANP says Deny, but PrimaryIDKey is the same) + // 4. connection A still generates samples with sampleID=1, but now it is "Denied by ANP namespace+name" + // In reality, connection A is still allowed, as existing connections are not affected by ANP updates. + // To avoid this, we encode Match and Action to the sampleID, to ensure a new sampleID is assigned on Match or action change. + // In that case stale sampleIDs will just report messages like "sampling for this connection was updated or deleted". + primaryID := acl.ExternalIDs[PrimaryIDKey.String()] + acl.Match + acl.Action + h := fnv.New32a() + h.Write([]byte(primaryID)) + return h.Sum32() +} + +func getACLSampleFeature(acl *nbdb.ACL) SampleFeature { + switch acl.ExternalIDs[OwnerTypeKey.String()] { + case AdminNetworkPolicyOwnerType, BaselineAdminNetworkPolicyOwnerType: + return AdminNetworkPolicySample + case MulticastNamespaceOwnerType, MulticastClusterOwnerType: + return MulticastSample + case NetpolNodeOwnerType, NetworkPolicyOwnerType, NetpolNamespaceOwnerType: + return NetworkPolicySample + case EgressFirewallOwnerType: + return EgressFirewallSample + case UDNIsolationOwnerType: + return UDNIsolationSample + } + return "" +} diff --git a/go-controller/pkg/libovsdb/ops/switch.go b/go-controller/pkg/libovsdb/ops/switch.go index fbee34b55a..250868667b 100644 --- a/go-controller/pkg/libovsdb/ops/switch.go +++ b/go-controller/pkg/libovsdb/ops/switch.go @@ -4,9 +4,9 @@ import ( "context" "errors" "fmt" - libovsdbclient "github.com/ovn-org/libovsdb/client" libovsdb "github.com/ovn-org/libovsdb/ovsdb" + ovntypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" @@ -15,6 +15,17 @@ import ( // LOGICAL_SWITCH OPs type switchPredicate func(*nbdb.LogicalSwitch) bool +type switchPortPredicate func(port *nbdb.LogicalSwitchPort) bool + +// FindLogicalSwitchPortWithPredicate looks up logical switches ports from the cache +// based on a given predicate +func FindLogicalSwitchPortWithPredicate(nbClient libovsdbclient.Client, p switchPortPredicate) ([]*nbdb.LogicalSwitchPort, error) { + found := []*nbdb.LogicalSwitchPort{} + ctx, cancel := context.WithTimeout(context.Background(), ovntypes.OVSDBTimeout) + defer cancel() + err := nbClient.WhereCache(p).List(ctx, &found) + return found, err +} // FindLogicalSwitchesWithPredicate looks up logical switches from the cache // based on a given predicate @@ -286,37 +297,54 @@ func GetLogicalSwitchPort(nbClient libovsdbclient.Client, lsp *nbdb.LogicalSwitc return found[0], nil } -func createOrUpdateLogicalSwitchPortsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, sw *nbdb.LogicalSwitch, createSwitch bool, lsps ...*nbdb.LogicalSwitchPort) ([]libovsdb.Operation, error) { +func createOrUpdateLogicalSwitchPortOpModelWithCustomFields(sw *nbdb.LogicalSwitch, lsp *nbdb.LogicalSwitchPort, createLSP bool, customFields []ModelUpdateField) operationModel { + var fieldInterfaces []interface{} + if len(customFields) != 0 { + fieldInterfaces = getFieldsToUpdate(lsp, customFields) + } else { + fieldInterfaces = getAllUpdatableFields(lsp) + } + return operationModel{ + Model: lsp, + OnModelUpdates: fieldInterfaces, + DoAfter: func() { + // lsp.UUID should be set here + sw.Ports = append(sw.Ports, lsp.UUID) + }, + ErrNotFound: !createLSP, + BulkOp: false, + } +} + +func createOrUpdateLogicalSwitchPortsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, sw *nbdb.LogicalSwitch, createSwitch, createLSP bool, customFields []ModelUpdateField, lsps ...*nbdb.LogicalSwitchPort) ([]libovsdb.Operation, error) { originalPorts := sw.Ports sw.Ports = make([]string, 0, len(lsps)) opModels := make([]operationModel, 0, len(lsps)+1) - for i := range lsps { - lsp := lsps[i] - opModel := operationModel{ - Model: lsp, - OnModelUpdates: getAllUpdatableFields(lsp), - DoAfter: func() { sw.Ports = append(sw.Ports, lsp.UUID) }, - ErrNotFound: false, - BulkOp: false, - } + + for _, lsp := range lsps { + opModel := createOrUpdateLogicalSwitchPortOpModelWithCustomFields(sw, lsp, createLSP, customFields) opModels = append(opModels, opModel) } - opModel := operationModel{ + + opModelSwitch := operationModel{ Model: sw, OnModelMutations: []interface{}{&sw.Ports}, ErrNotFound: !createSwitch, BulkOp: false, } - opModels = append(opModels, opModel) + opModels = append(opModels, opModelSwitch) m := newModelClient(nbClient) ops, err := m.CreateOrUpdateOps(ops, opModels...) sw.Ports = originalPorts + if err != nil && errors.Is(err, libovsdbclient.ErrNotFound) && !createSwitch { + err = fmt.Errorf("could not find switch: %q, %w", sw.Name, err) + } return ops, err } func createOrUpdateLogicalSwitchPorts(nbClient libovsdbclient.Client, sw *nbdb.LogicalSwitch, createSwitch bool, lsps ...*nbdb.LogicalSwitchPort) error { - ops, err := createOrUpdateLogicalSwitchPortsOps(nbClient, nil, sw, createSwitch, lsps...) + ops, err := createOrUpdateLogicalSwitchPortsOps(nbClient, nil, sw, createSwitch, true, nil, lsps...) if err != nil { return err } @@ -325,11 +353,18 @@ func createOrUpdateLogicalSwitchPorts(nbClient libovsdbclient.Client, sw *nbdb.L return err } -// CreateOrUpdateLogicalSwitchPortsOnSwitchOps creates or updates the provided +// CreateOrUpdateLogicalSwitchPortsOnSwitchWithCustomFieldsOps creates or updates the provided +// logical switch ports, adds them to the provided logical switch and returns +// the corresponding ops +func CreateOrUpdateLogicalSwitchPortsOnSwitchWithCustomFieldsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, sw *nbdb.LogicalSwitch, customFields []ModelUpdateField, lsps ...*nbdb.LogicalSwitchPort) ([]libovsdb.Operation, error) { + return createOrUpdateLogicalSwitchPortsOps(nbClient, ops, sw, false, true, customFields, lsps...) +} + +// UpdateLogicalSwitchPortsOnSwitchWithCustomFieldsOps updates the provided // logical switch ports, adds them to the provided logical switch and returns // the corresponding ops -func CreateOrUpdateLogicalSwitchPortsOnSwitchOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, sw *nbdb.LogicalSwitch, lsps ...*nbdb.LogicalSwitchPort) ([]libovsdb.Operation, error) { - return createOrUpdateLogicalSwitchPortsOps(nbClient, ops, sw, false, lsps...) +func UpdateLogicalSwitchPortsOnSwitchWithCustomFieldsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, sw *nbdb.LogicalSwitch, customFields []ModelUpdateField, lsps ...*nbdb.LogicalSwitchPort) ([]libovsdb.Operation, error) { + return createOrUpdateLogicalSwitchPortsOps(nbClient, ops, sw, false, false, customFields, lsps...) } // CreateOrUpdateLogicalSwitchPortsOnSwitch creates or updates the provided diff --git a/go-controller/pkg/libovsdb/util/router.go b/go-controller/pkg/libovsdb/util/router.go index 2036ec34f8..894ae6f039 100644 --- a/go-controller/pkg/libovsdb/util/router.go +++ b/go-controller/pkg/libovsdb/util/router.go @@ -3,12 +3,15 @@ package util import ( "fmt" "net" + "strings" libovsdbclient "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + "k8s.io/klog/v2" utilnet "k8s.io/utils/net" ) @@ -29,30 +32,50 @@ import ( // (TODO: FIXME): With this route, we are officially breaking support for IC with zones that have multiple-nodes // NOTE: This route is exactly the same as what is added by pod-live-migration feature and we keep the route exactly // same across the 3 features so that if the route already exists on the node, this is just a no-op -func CreateDefaultRouteToExternal(nbClient libovsdbclient.Client, clusterRouter, gwRouterName string) error { +func CreateDefaultRouteToExternal(nbClient libovsdbclient.Client, clusterRouter, gwRouterName string, clusterSubnets []config.CIDRNetworkEntry) error { gatewayIPs, err := GetLRPAddrs(nbClient, types.GWRouterToJoinSwitchPrefix+gwRouterName) if err != nil { return fmt.Errorf("attempt at finding node gateway router %s network information failed, err: %w", gwRouterName, err) } - clusterSubnets := util.GetAllClusterSubnets() - for _, subnet := range clusterSubnets { - gatewayIP, err := util.MatchFirstIPNetFamily(utilnet.IsIPv6String(subnet.IP.String()), gatewayIPs) + for _, clusterSubnet := range clusterSubnets { + isClusterSubnetIPV6 := utilnet.IsIPv6String(clusterSubnet.CIDR.IP.String()) + gatewayIP, err := util.MatchFirstIPNetFamily(isClusterSubnetIPV6, gatewayIPs) if err != nil { return fmt.Errorf("could not find gateway IP for gateway router %s with family %v: %v", gwRouterName, false, err) } lrsr := nbdb.LogicalRouterStaticRoute{ - IPPrefix: subnet.String(), + IPPrefix: clusterSubnet.CIDR.String(), Nexthop: gatewayIP.IP.String(), Policy: &nbdb.LogicalRouterStaticRoutePolicySrcIP, } + + clusterSubnetPrefixLen, _ := clusterSubnet.CIDR.Mask.Size() p := func(lrsr *nbdb.LogicalRouterStaticRoute) bool { + // Replace any existing LRSR for the cluster subnet. + // Make sure you don't wipe out the existing LRSR via mp0 for the local node subnet + // (e.g. 10.244.1.0/24 10.244.1.2 src-ip) and take into account cluster subnet expansion, + // which imposes the IP address part of the subnet to stay the same and only allows + // the mask length to be decreased (e.g. from 10.244.0.0/16 to 10.244.0.0/15, as long + // as 10.244.0.0 stays the same). + if utilnet.IsIPv6String(lrsr.Nexthop) != isClusterSubnetIPV6 { + return false + } + if !strings.Contains(lrsr.IPPrefix, "/") { + // skip /32 (v4) or /128 (v6) routes, not rendered with prefix length in OVN + return false + } _, itemCIDR, err := net.ParseCIDR(lrsr.IPPrefix) if err != nil { + klog.Errorf("Failed to parse CIDR %s of lrsr %+v: %v", lrsr.IPPrefix, lrsr, err) return false } - return util.ContainsCIDR(subnet, itemCIDR) && - lrsr.Nexthop == gatewayIP.IP.String() && + itemPrefixLen, _ := itemCIDR.Mask.Size() + + return clusterSubnet.CIDR.IP.Equal(itemCIDR.IP) && // even after expansion, cluster network address cannot change + clusterSubnetPrefixLen <= itemPrefixLen && // cluster subnet mask len can only be decreased + itemPrefixLen < clusterSubnet.HostSubnetLength && // don't match the local node subnet route lrsr.Policy != nil && *lrsr.Policy == nbdb.LogicalRouterStaticRoutePolicySrcIP + } if err := libovsdbops.CreateOrReplaceLogicalRouterStaticRouteWithPredicate(nbClient, clusterRouter, &lrsr, p); err != nil { return fmt.Errorf("unable to create pod to external catch-all reroute for gateway router %s, err: %v", gwRouterName, err) diff --git a/go-controller/pkg/libovsdb/util/router_test.go b/go-controller/pkg/libovsdb/util/router_test.go new file mode 100644 index 0000000000..2324f1a7de --- /dev/null +++ b/go-controller/pkg/libovsdb/util/router_test.go @@ -0,0 +1,237 @@ +package util + +import ( + "fmt" + "net" + "testing" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" +) + +func TestCreateDefaultRouteToExternal(t *testing.T) { + + config.PrepareTestConfig() + nodeName := "ovn-worker" + + _, clusterSubnetV4, _ := net.ParseCIDR("10.128.0.0/16") + _, clusterSubnetV6, _ := net.ParseCIDR("fe00::/48") + config.Default.ClusterSubnets = []config.CIDRNetworkEntry{{clusterSubnetV4, 24}, {clusterSubnetV6, 64}} + + ovnClusterRouterName := types.OVNClusterRouter + gwRouterName := types.GWRouterPrefix + nodeName + gwRouterPortName := types.GWRouterToJoinSwitchPrefix + gwRouterName + gwRouterIPAddressV4 := "100.64.0.3" + gwRouterIPAddressV6 := "fd98::3" + gwRouterPort := &nbdb.LogicalRouterPort{ + UUID: gwRouterPortName + "-uuid", + Name: gwRouterPortName, + Networks: []string{gwRouterIPAddressV4 + "/16", gwRouterIPAddressV6 + "/64"}, + } + + clusterSubnetRouteV4 := &nbdb.LogicalRouterStaticRoute{ + IPPrefix: clusterSubnetV4.String(), + Nexthop: gwRouterIPAddressV4, + Policy: &nbdb.LogicalRouterStaticRoutePolicySrcIP, + UUID: "cluster-subnet-route-v4-uuid", + } + clusterSubnetRouteV6 := &nbdb.LogicalRouterStaticRoute{ + IPPrefix: clusterSubnetV6.String(), + Nexthop: gwRouterIPAddressV6, + Policy: &nbdb.LogicalRouterStaticRoutePolicySrcIP, + UUID: "cluster-subnet-route-v6-uuid", + } + + // to test that we won't erase the old cluster subnet route that had a different next hop + wrongNextHopClusterSubnetRouteV4 := &nbdb.LogicalRouterStaticRoute{ + IPPrefix: clusterSubnetV4.String(), + Nexthop: "100.64.0.33", + Policy: &nbdb.LogicalRouterStaticRoutePolicySrcIP, + UUID: "old-cluster-subnet-route-v4-uuid", + } + wrongNextHopClusterSubnetRouteV6 := &nbdb.LogicalRouterStaticRoute{ + IPPrefix: clusterSubnetV6.String(), + Nexthop: "fd98::33", + Policy: &nbdb.LogicalRouterStaticRoutePolicySrcIP, + UUID: "old-cluster-subnet-route-v6-uuid", + } + + // to test that we won't erase the existing route for the node subnet via mp0 + _, nodeSubnetV4, _ := net.ParseCIDR("10.128.0.0/24") + _, nodeSubnetV6, _ := net.ParseCIDR("fe00::/64") + mp0IPAddressV4 := "100.244.0.2" + mp0IPAddressV6 := "fe00::2" + + nodeSubnetRouteV4 := &nbdb.LogicalRouterStaticRoute{ + IPPrefix: nodeSubnetV4.String(), + Nexthop: mp0IPAddressV4, + Policy: &nbdb.LogicalRouterStaticRoutePolicySrcIP, + UUID: "node-subnet-route-v4-uuid", + } + nodeSubnetRouteV6 := &nbdb.LogicalRouterStaticRoute{ + IPPrefix: nodeSubnetV6.String(), + Nexthop: mp0IPAddressV6, + Policy: &nbdb.LogicalRouterStaticRoutePolicySrcIP, + UUID: "node-subnet-route-v6-uuid", + } + + // to test that, after cluster subnet expansion, we replace the old cluster subnet route with the new one + _, newClusterSubnetV4, _ := net.ParseCIDR("10.128.0.0/15") + _, newClusterSubnetV6, _ := net.ParseCIDR("fe00::/46") + newClusterSubnetRouteAfterExpansionV4 := &nbdb.LogicalRouterStaticRoute{ + IPPrefix: newClusterSubnetV4.String(), + Nexthop: gwRouterIPAddressV4, + Policy: &nbdb.LogicalRouterStaticRoutePolicySrcIP, + UUID: "new-cluster-subnet-route-v4-uuid", + } + newClusterSubnetRouteAfterExpansionV6 := &nbdb.LogicalRouterStaticRoute{ + IPPrefix: newClusterSubnetV6.String(), + Nexthop: gwRouterIPAddressV6, + Policy: &nbdb.LogicalRouterStaticRoutePolicySrcIP, + UUID: "new-cluster-subnet-route-v6-uuid", + } + + gatewayRouter := &nbdb.LogicalRouter{ + Name: gwRouterName, + UUID: gwRouterName + "-uuid", + Ports: []string{gwRouterPort.UUID}, + } + + tests := []struct { + desc string + initialNbdb libovsdbtest.TestSetup + expectedNbdb libovsdbtest.TestSetup + preTestAction func() + }{ + { + desc: "Add a cluster subnet route to GW router when no cluster subnet route exists", + initialNbdb: libovsdbtest.TestSetup{ + NBData: []libovsdbtest.TestData{ + &nbdb.LogicalRouter{ + Name: ovnClusterRouterName, + UUID: ovnClusterRouterName + "-uuid", + StaticRoutes: []string{nodeSubnetRouteV4.UUID, nodeSubnetRouteV6.UUID}, // should not be replaced + }, + gatewayRouter, + gwRouterPort, + nodeSubnetRouteV4, + nodeSubnetRouteV6, + }, + }, + expectedNbdb: libovsdbtest.TestSetup{ + NBData: []libovsdbtest.TestData{ + &nbdb.LogicalRouter{ + Name: ovnClusterRouterName, + UUID: ovnClusterRouterName + "-uuid", + StaticRoutes: []string{nodeSubnetRouteV4.UUID, nodeSubnetRouteV6.UUID, clusterSubnetRouteV4.UUID, clusterSubnetRouteV6.UUID}, + }, + gatewayRouter, + gwRouterPort, + nodeSubnetRouteV4, + nodeSubnetRouteV6, + clusterSubnetRouteV4, + clusterSubnetRouteV6, + }, + }, + }, + { + desc: "Add a cluster subnet route to GW router when a cluster subnet route already exists", // should replace the existing one + initialNbdb: libovsdbtest.TestSetup{ + NBData: []libovsdbtest.TestData{ + wrongNextHopClusterSubnetRouteV4, + wrongNextHopClusterSubnetRouteV6, + &nbdb.LogicalRouter{ + Name: ovnClusterRouterName, + UUID: ovnClusterRouterName + "-uuid", + StaticRoutes: []string{nodeSubnetRouteV4.UUID, nodeSubnetRouteV6.UUID, wrongNextHopClusterSubnetRouteV4.UUID, wrongNextHopClusterSubnetRouteV6.UUID}, + }, + gatewayRouter, + gwRouterPort, + nodeSubnetRouteV4, + nodeSubnetRouteV6, + }, + }, + expectedNbdb: libovsdbtest.TestSetup{ + NBData: []libovsdbtest.TestData{ + &nbdb.LogicalRouter{ + Name: ovnClusterRouterName, + UUID: ovnClusterRouterName + "-uuid", + StaticRoutes: []string{nodeSubnetRouteV4.UUID, nodeSubnetRouteV6.UUID, clusterSubnetRouteV4.UUID, clusterSubnetRouteV6.UUID}, + }, + gatewayRouter, + gwRouterPort, + nodeSubnetRouteV4, + nodeSubnetRouteV6, + clusterSubnetRouteV4, + clusterSubnetRouteV6, + }, + }, + }, + { + desc: "Update the cluster subnet route to GW router after cluster subnet expansion", // should replace the existing one + initialNbdb: libovsdbtest.TestSetup{ + NBData: []libovsdbtest.TestData{ + clusterSubnetRouteV4, + clusterSubnetRouteV6, + &nbdb.LogicalRouter{ + Name: ovnClusterRouterName, + UUID: ovnClusterRouterName + "-uuid", + StaticRoutes: []string{nodeSubnetRouteV4.UUID, nodeSubnetRouteV6.UUID, clusterSubnetRouteV4.UUID, clusterSubnetRouteV6.UUID}, + }, + gatewayRouter, + gwRouterPort, + nodeSubnetRouteV4, + nodeSubnetRouteV6, + }, + }, + preTestAction: func() { + // Apply the new cluster subnets + config.Default.ClusterSubnets = []config.CIDRNetworkEntry{{newClusterSubnetV4, 24}, {newClusterSubnetV6, 64}} + + }, + expectedNbdb: libovsdbtest.TestSetup{ + NBData: []libovsdbtest.TestData{ + &nbdb.LogicalRouter{ + Name: ovnClusterRouterName, + UUID: ovnClusterRouterName + "-uuid", + StaticRoutes: []string{nodeSubnetRouteV4.UUID, nodeSubnetRouteV6.UUID, newClusterSubnetRouteAfterExpansionV4.UUID, newClusterSubnetRouteAfterExpansionV6.UUID}, + }, + gatewayRouter, + gwRouterPort, + nodeSubnetRouteV4, + nodeSubnetRouteV6, + newClusterSubnetRouteAfterExpansionV4, + newClusterSubnetRouteAfterExpansionV6, + }, + }, + }, + } + for i, tc := range tests { + t.Run(fmt.Sprintf("%d:%s", i, tc.desc), func(t *testing.T) { + nbClient, cleanup, err := libovsdbtest.NewNBTestHarness(tc.initialNbdb, nil) + if err != nil { + t.Fatal(fmt.Errorf("test: \"%s\" failed to create test harness: %v", tc.desc, err)) + } + t.Cleanup(cleanup.Cleanup) + + if tc.preTestAction != nil { + tc.preTestAction() + } + + if err = CreateDefaultRouteToExternal(nbClient, ovnClusterRouterName, gwRouterName, config.Default.ClusterSubnets); err != nil { + t.Fatal(fmt.Errorf("failed to run CreateDefaultRouteToExternal: %v", err)) + } + + matcher := libovsdbtest.HaveDataIgnoringUUIDs(tc.expectedNbdb.NBData) + success, err := matcher.Match(nbClient) + if !success { + t.Fatal(fmt.Errorf("test: \"%s\" didn't match expected with actual, err: %v", tc.desc, matcher.FailureMessage(nbClient))) + } + if err != nil { + t.Fatal(fmt.Errorf("test: \"%s\" encountered error: %v", tc.desc, err)) + } + }) + } +} diff --git a/go-controller/pkg/metrics/metrics.go b/go-controller/pkg/metrics/metrics.go index 6d4d86c8b6..d26185acd5 100644 --- a/go-controller/pkg/metrics/metrics.go +++ b/go-controller/pkg/metrics/metrics.go @@ -15,6 +15,7 @@ import ( "sync" "time" + libovsdbclient "github.com/ovn-org/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" "github.com/prometheus/client_golang/prometheus" @@ -560,8 +561,9 @@ func startMetricsServer(bindAddress, certFile, keyFile string, handler http.Hand }() } -func RegisterOvnMetrics(clientset kubernetes.Interface, k8sNodeName string, stopChan <-chan struct{}) { +func RegisterOvnMetrics(clientset kubernetes.Interface, k8sNodeName string, ovsDBClient libovsdbclient.Client, + metricsScrapeInterval int, stopChan <-chan struct{}) { go RegisterOvnDBMetrics(clientset, k8sNodeName, stopChan) - go RegisterOvnControllerMetrics(stopChan) + go RegisterOvnControllerMetrics(ovsDBClient, metricsScrapeInterval, stopChan) go RegisterOvnNorthdMetrics(clientset, k8sNodeName, stopChan) } diff --git a/go-controller/pkg/metrics/metrics_suite_test.go b/go-controller/pkg/metrics/metrics_suite_test.go index 77130f463f..5b44ddaaa2 100644 --- a/go-controller/pkg/metrics/metrics_suite_test.go +++ b/go-controller/pkg/metrics/metrics_suite_test.go @@ -3,7 +3,7 @@ package metrics import ( "testing" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/metrics/ovn.go b/go-controller/pkg/metrics/ovn.go index ca80e5b69b..23556049cc 100644 --- a/go-controller/pkg/metrics/ovn.go +++ b/go-controller/pkg/metrics/ovn.go @@ -7,7 +7,10 @@ import ( "k8s.io/klog/v2" + libovsdbclient "github.com/ovn-org/libovsdb/client" + ovsops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/ovs" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/vswitchd" "github.com/prometheus/client_golang/prometheus" ) @@ -248,76 +251,63 @@ var ovnControllerStopwatchShowMetricsMap = map[string]*stopwatchMetricDetails{ // setOvnControllerConfigurationMetrics updates ovn-controller configuration // values (ovn-openflow-probe-interval, ovn-remote-probe-interval, ovn-monitor-all, -// ovn-encap-ip, ovn-encap-type, ovn-remote) through -// "ovs-vsctl list --columns=external_ids Open_vSwitch ." -func setOvnControllerConfigurationMetrics() (err error) { - var stdout, stderr string - - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("recovering from panic while parsing the "+ - "Open_vSwitch table's external_ids column - %v", r) - } - }() - - stdout, stderr, err = util.RunOVSVsctl("--no-headings", "--data=bare", - "--columns=external_ids", "list", "Open_vSwitch", ".") +// ovn-encap-ip, ovn-encap-type, ovn-remote) obtained from Open_vSwitch entry updates +func setOvnControllerConfigurationMetrics(ovsDBClient libovsdbclient.Client) (err error) { + openvSwitch, err := ovsops.GetOpenvSwitch(ovsDBClient) if err != nil { - return fmt.Errorf("failed to get Open_vSwitch table's external_ids column "+ - "stderr(%s) : %v", stderr, err) + return fmt.Errorf("failed to get Open_vSwitch entry (%v)", err) + } + + openflowProbeField := openvSwitch.ExternalIDs["ovn-openflow-probe-interval"] + openflowProbeVal := parseMetricToFloat(MetricOvnSubsystemController, "ovn-openflow-probe-interval", openflowProbeField) + metricOpenFlowProbeInterval.Set(openflowProbeVal) + + remoteProbeField := openvSwitch.ExternalIDs["ovn-remote-probe-interval"] + remoteProbeValue := parseMetricToFloat(MetricOvnSubsystemController, "ovn-remote-probe-interval", remoteProbeField) + metricRemoteProbeInterval.Set(remoteProbeValue / 1000) + + var ovnMonitorValue float64 + ovnMonitorField := openvSwitch.ExternalIDs["ovn-monitor-all"] + if ovnMonitorField == "true" { + ovnMonitorValue = 1 } + metricMonitorAll.Set(ovnMonitorValue) + + // To update not only values but also labels for metrics, we use Reset() to delete previous labels+value + encapIPValue := openvSwitch.ExternalIDs["ovn-encap-ip"] + metricEncapIP.Reset() + metricEncapIP.WithLabelValues(encapIPValue).Set(1) + + encapTypeValue := openvSwitch.ExternalIDs["ovn-encap-type"] + metricEncapType.Reset() + metricEncapType.WithLabelValues(encapTypeValue).Set(1) + + ovnRemoteValue := openvSwitch.ExternalIDs["ovn-remote"] + metricSbConnectionMethod.Reset() + metricSbConnectionMethod.WithLabelValues(ovnRemoteValue).Set(1) var ovnNodePortValue = 1 - for _, kvPair := range strings.Fields(stdout) { - elem := strings.Split(kvPair, "=") - if len(elem) != 2 { - continue - } - fieldType := elem[0] - fieldValue := elem[1] - switch fieldType { - case "ovn-openflow-probe-interval": - metricValue := parseMetricToFloat(MetricOvnSubsystemController, "ovn-openflow-probe-interval", fieldValue) - metricOpenFlowProbeInterval.Set(metricValue) - case "ovn-remote-probe-interval": - metricValue := parseMetricToFloat(MetricOvnSubsystemController, "ovn-remote-probe-interval", fieldValue) - metricRemoteProbeInterval.Set(metricValue / 1000) - case "ovn-monitor-all": - var ovnMonitorValue float64 - if fieldValue == "true" { - ovnMonitorValue = 1 - } - metricMonitorAll.Set(ovnMonitorValue) - case "ovn-encap-ip": - // To update not only values but also labels for metrics, we use Reset() to delete previous labels+value - metricEncapIP.Reset() - metricEncapIP.WithLabelValues(fieldValue).Set(1) - case "ovn-remote": - metricSbConnectionMethod.Reset() - metricSbConnectionMethod.WithLabelValues(fieldValue).Set(1) - case "ovn-encap-type": - metricEncapType.Reset() - metricEncapType.WithLabelValues(fieldValue).Set(1) - case "ovn-k8s-node-port": - if fieldValue == "false" { - ovnNodePortValue = 0 - } - case "ovn-bridge-mappings": - metricBridgeMappings.Reset() - metricBridgeMappings.WithLabelValues(fieldValue).Set(1) - } + nodePortField := openvSwitch.ExternalIDs["ovn-k8s-node-port"] + if nodePortField == "false" { + ovnNodePortValue = 0 } metricOvnNodePortEnabled.Set(float64(ovnNodePortValue)) + + bridgeMappingValue := openvSwitch.ExternalIDs["ovn-bridge-mappings"] + metricBridgeMappings.Reset() + metricBridgeMappings.WithLabelValues(bridgeMappingValue).Set(1) + return nil } -func ovnControllerConfigurationMetricsUpdater(stopChan <-chan struct{}) { - ticker := time.NewTicker(30 * time.Second) +func ovnControllerConfigurationMetricsUpdater(ovsDBClient libovsdbclient.Client, metricsScrapeInterval int, + stopChan <-chan struct{}) { + ticker := time.NewTicker(time.Duration(metricsScrapeInterval) * time.Second) defer ticker.Stop() for { select { case <-ticker.C: - if err := setOvnControllerConfigurationMetrics(); err != nil { + if err := setOvnControllerConfigurationMetrics(ovsDBClient); err != nil { klog.Errorf("Setting ovn controller config metrics failed: %s", err.Error()) } case <-stopChan: @@ -326,24 +316,26 @@ func ovnControllerConfigurationMetricsUpdater(stopChan <-chan struct{}) { } } -func getPortCount(portType string) float64 { +func getPortCount(ovsDBClient libovsdbclient.Client, portType string) float64 { var portCount float64 - stdout, stderr, err := util.RunOVSVsctl("--no-headings", "--data=bare", "--format=csv", - "--columns=name", "find", "interface", "type="+portType) + p := func(item *vswitchd.Interface) bool { + return item.Type == portType + } + + intfList, err := ovsops.FindInterfacesWithPredicate(ovsDBClient, p) if err != nil { - klog.Errorf("Failed to get %s interface count, stderr(%s): (%v)", portType, stderr, err) + klog.Errorf("Failed to get %s interface count: %v", portType, err) return 0 } - portNames := strings.Split(stdout, "\n") - switch portType { - case "patch": - for _, portName := range portNames { - if strings.Contains(portName, "br-int") { + if portType == "patch" { + for _, intf := range intfList { + if strings.Contains(intf.Name, "br-int") { portCount++ } + } - default: - portCount = float64(len(portNames)) + } else { + portCount = float64(len(intfList)) } return portCount @@ -407,7 +399,8 @@ func updateSBDBConnectionMetric(ovsAppctl ovsClient, retry int, retrySleep time. } } -func RegisterOvnControllerMetrics(stopChan <-chan struct{}) { +func RegisterOvnControllerMetrics(ovsDBClient libovsdbclient.Client, + metricsScrapeInterval int, stopChan <-chan struct{}) { getOvnControllerVersionInfo() ovnRegistry.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ @@ -457,7 +450,7 @@ func RegisterOvnControllerMetrics(stopChan <-chan struct{}) { "bridge to physical OVS bridge and br-local OVS bridge.", }, func() float64 { - return getPortCount("patch") + return getPortCount(ovsDBClient, "patch") })) ovnRegistry.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ @@ -467,7 +460,7 @@ func RegisterOvnControllerMetrics(stopChan <-chan struct{}) { Help: "Captures the number of geneve ports that are on br-int OVS bridge.", }, func() float64 { - return getPortCount("geneve") + return getPortCount(ovsDBClient, "geneve") })) // register ovn-controller configuration metrics @@ -487,7 +480,8 @@ func RegisterOvnControllerMetrics(stopChan <-chan struct{}) { registerStopwatchShowMetrics(ovnController, MetricOvnNamespace, MetricOvnSubsystemController) // ovn-controller configuration metrics updater - go ovnControllerConfigurationMetricsUpdater(stopChan) + go ovnControllerConfigurationMetricsUpdater(ovsDBClient, + metricsScrapeInterval, stopChan) // ovn-controller coverage show metrics updater go coverageShowMetricsUpdater(ovnController, stopChan) // ovn-controller stopwatch show metrics updater diff --git a/go-controller/pkg/metrics/ovnkube_controller.go b/go-controller/pkg/metrics/ovnkube_controller.go index 1e064e019c..f52fc46850 100644 --- a/go-controller/pkg/metrics/ovnkube_controller.go +++ b/go-controller/pkg/metrics/ovnkube_controller.go @@ -718,7 +718,7 @@ type item struct { type PodRecorder struct { records map[kapimtypes.UID]*record - queue workqueue.Interface + queue workqueue.TypedInterface[*item] } func NewPodRecorder() PodRecorder { @@ -736,7 +736,7 @@ func (pr *PodRecorder) Run(sbClient libovsdbclient.Client, stop <-chan struct{}) prometheus.MustRegister(metricPortBindingChassisLatency) }) - pr.queue = workqueue.New() + pr.queue = workqueue.NewTyped[*item]() pr.records = make(map[kapimtypes.UID]*record) sbClient.Cache().AddEventHandler(&cache.EventHandlerFuncs{ @@ -745,7 +745,7 @@ func (pr *PodRecorder) Run(sbClient libovsdbclient.Client, stop <-chan struct{}) return } if !pr.queueFull() { - pr.queue.Add(item{op: addPortBinding, old: model, timestamp: time.Now()}) + pr.queue.Add(&item{op: addPortBinding, old: model, timestamp: time.Now()}) } }, UpdateFunc: func(table string, old model.Model, new model.Model) { @@ -757,12 +757,12 @@ func (pr *PodRecorder) Run(sbClient libovsdbclient.Client, stop <-chan struct{}) // chassis assigned if oldRow.Chassis == nil && newRow.Chassis != nil { if !pr.queueFull() { - pr.queue.Add(item{op: updatePortBinding, old: old, new: new, timestamp: time.Now()}) + pr.queue.Add(&item{op: updatePortBinding, old: old, new: new, timestamp: time.Now()}) } // port binding up } else if oldRow.Up != nil && !*oldRow.Up && newRow.Up != nil && *newRow.Up { if !pr.queueFull() { - pr.queue.Add(item{op: updatePortBinding, old: old, new: new, timestamp: time.Now()}) + pr.queue.Add(&item{op: updatePortBinding, old: old, new: new, timestamp: time.Now()}) } } }, @@ -778,13 +778,13 @@ func (pr *PodRecorder) Run(sbClient libovsdbclient.Client, stop <-chan struct{}) func (pr *PodRecorder) AddPod(podUID kapimtypes.UID) { if pr.queue != nil && !pr.queueFull() { - pr.queue.Add(item{op: addPod, uid: podUID, timestamp: time.Now()}) + pr.queue.Add(&item{op: addPod, uid: podUID, timestamp: time.Now()}) } } func (pr *PodRecorder) CleanPod(podUID kapimtypes.UID) { if pr.queue != nil && !pr.queueFull() { - pr.queue.Add(item{op: cleanPod, uid: podUID}) + pr.queue.Add(&item{op: cleanPod, uid: podUID}) } } @@ -794,7 +794,7 @@ func (pr *PodRecorder) AddLSP(podUID kapimtypes.UID, netInfo util.NetInfo) { return } if pr.queue != nil && !pr.queueFull() { - pr.queue.Add(item{op: addLogicalSwitchPort, uid: podUID, timestamp: time.Now()}) + pr.queue.Add(&item{op: addLogicalSwitchPort, uid: podUID, timestamp: time.Now()}) } } @@ -873,12 +873,12 @@ func (pr *PodRecorder) processNextItem() bool { if term { return false } - pr.processItem(i.(item)) + pr.processItem(i) pr.queue.Done(i) return true } -func (pr *PodRecorder) processItem(i item) { +func (pr *PodRecorder) processItem(i *item) { switch i.op { case addPortBinding: pr.addPortBinding(i.old, i.timestamp) diff --git a/go-controller/pkg/metrics/ovnkube_controller_test.go b/go-controller/pkg/metrics/ovnkube_controller_test.go index 78c1672123..1596de88ec 100644 --- a/go-controller/pkg/metrics/ovnkube_controller_test.go +++ b/go-controller/pkg/metrics/ovnkube_controller_test.go @@ -15,7 +15,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" fakeclientgo "k8s.io/client-go/kubernetes/fake" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/metrics/ovs.go b/go-controller/pkg/metrics/ovs.go index 8d43278c94..35af9df820 100644 --- a/go-controller/pkg/metrics/ovs.go +++ b/go-controller/pkg/metrics/ovs.go @@ -5,12 +5,13 @@ package metrics import ( "fmt" - "strconv" "strings" "sync" "time" + libovsdbclient "github.com/ovn-org/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + ovsops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/ovs" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/collectors" @@ -271,17 +272,28 @@ var metricOvsTcPolicy = prometheus.NewGauge(prometheus.GaugeOpts{ type ovsClient func(args ...string) (string, string, error) -func getOvsVersionInfo() { - stdout, _, err := util.RunOvsVswitchdAppCtl("version") +func convertToFloat64(val *int) float64 { + var value float64 + if val != nil { + value = float64(*val) + } else { + value = 0 + } + return value +} + +func getOvsVersionInfo(ovsDBClient libovsdbclient.Client) { + openvSwitch, err := ovsops.GetOpenvSwitch(ovsDBClient) if err != nil { - klog.Errorf("Failed to get version information: %s", err.Error()) + klog.Errorf("Failed to get ovsdb openvswitch entry :(%v)", err) return } - if !strings.HasPrefix(stdout, "ovs-vswitchd (Open vSwitch)") { - klog.Errorf("Unexpected ovs-appctl version output: %s", stdout) + if openvSwitch.OVSVersion != nil { + ovsVersion = *openvSwitch.OVSVersion + } else { + klog.Errorf("Failed to get ovs version information") return } - ovsVersion = strings.Fields(stdout)[3] } // ovsDatapathLookupsMetrics obtains the ovs datapath @@ -414,8 +426,8 @@ func setOvsDatapathMetrics(ovsAppctl ovsClient, datapaths []string) (err error) } // ovsDatapathMetricsUpdater updates the ovs datapath metrics -func ovsDatapathMetricsUpdater(ovsAppctl ovsClient, tickPeriod time.Duration, stopChan <-chan struct{}) { - ticker := time.NewTicker(tickPeriod) +func ovsDatapathMetricsUpdater(ovsAppctl ovsClient, metricsScrapeInterval int, stopChan <-chan struct{}) { + ticker := time.NewTicker(time.Duration(metricsScrapeInterval) * time.Second) defer ticker.Stop() for { select { @@ -435,14 +447,14 @@ func ovsDatapathMetricsUpdater(ovsAppctl ovsClient, tickPeriod time.Duration, st } // ovsBridgeMetricsUpdater updates bridge related metrics -func ovsBridgeMetricsUpdater(ovsVsctl, ovsAppctl ovsClient, tickPeriod time.Duration, stopChan <-chan struct{}) { - ticker := time.NewTicker(tickPeriod) +func ovsBridgeMetricsUpdater(ovsDBClient libovsdbclient.Client, ovsAppctl ovsClient, metricsScrapeInterval int, stopChan <-chan struct{}) { + ticker := time.NewTicker(time.Duration(metricsScrapeInterval) * time.Second) defer ticker.Stop() var err error for { select { case <-ticker.C: - if err = updateOvsBridgeMetrics(ovsVsctl, ovsAppctl); err != nil { + if err = updateOvsBridgeMetrics(ovsDBClient, ovsAppctl); err != nil { klog.Errorf("Getting ovs bridge info failed: %s", err.Error()) } case <-stopChan: @@ -451,42 +463,22 @@ func ovsBridgeMetricsUpdater(ovsVsctl, ovsAppctl ovsClient, tickPeriod time.Dura } } -func updateOvsBridgeMetrics(ovsVsctl, ovsOfctl ovsClient) error { - stdout, stderr, err := ovsVsctl("--no-headings", "--data=bare", - "--format=csv", "--columns=name,port", "list", "Bridge") +func updateOvsBridgeMetrics(ovsDBClient libovsdbclient.Client, ovsOfctl ovsClient) error { + bridgeList, err := ovsops.ListBridges(ovsDBClient) if err != nil { - return fmt.Errorf("unable to update OVS bridge metrics due to failure to get output from"+ - " OVS client stderr(%s) :(%v)", stderr, err) - } - if stderr != "" { - return fmt.Errorf("unable to update OVS bridge metrics because OVS client returned error: %s", stderr) - } - if stdout == "" { - return fmt.Errorf("unable to update OVS bridge metrics because blank output received from OVS client") + return fmt.Errorf("failed to get ovsdb bridge table :(%v)", err) } - - //output will be of format :(br-local,12bc8575-8e1f-4583-b693-ea3b5bf09974 - // 5dc87c46-4d94-4469-9f7a-67ee1c8beb03 620cafe4-bfe5-4a23-8165-4ffc61e7de42) - var bridgeCount int - for _, kvPair := range strings.Split(stdout, "\n") { - if kvPair == "" { - continue - } - fields := strings.Split(kvPair, ",") - bridgeName := fields[0] - ports := strings.Fields(fields[1]) - if bridgeName != "" { - bridgeCount++ - metricOvsBridge.WithLabelValues(bridgeName).Set(1) - metricOvsBridgePortsTotal.WithLabelValues(bridgeName).Set(float64(len(ports))) - count, err := getOvsBridgeOpenFlowsCount(ovsOfctl, bridgeName) - if err != nil { - return err - } - metricOvsBridgeFlowsTotal.WithLabelValues(bridgeName).Set(count) + metricOvsBridgeTotal.Set(float64(len(bridgeList))) + for _, bridge := range bridgeList { + brName := bridge.Name + metricOvsBridge.WithLabelValues(brName).Set(1) + flowsCount, err := getOvsBridgeOpenFlowsCount(ovsOfctl, brName) + if err != nil { + return err } + metricOvsBridgeFlowsTotal.WithLabelValues(brName).Set(flowsCount) + metricOvsBridgePortsTotal.WithLabelValues(brName).Set(float64(len(bridge.Ports))) } - metricOvsBridgeTotal.Set(float64(bridgeCount)) return nil } @@ -516,14 +508,14 @@ func getOvsBridgeOpenFlowsCount(ovsOfctl ovsClient, bridgeName string) (float64, "flow_count field", bridgeName) } -func ovsInterfaceMetricsUpdater(ovsVsctl ovsClient, tickPeriod time.Duration, stopChan <-chan struct{}) { - ticker := time.NewTicker(tickPeriod) +func ovsInterfaceMetricsUpdater(ovsDBClient libovsdbclient.Client, metricsScrapeInterval int, stopChan <-chan struct{}) { + ticker := time.NewTicker(time.Duration(metricsScrapeInterval) * time.Second) defer ticker.Stop() var err error for { select { case <-ticker.C: - if err = updateOvsInterfaceMetrics(ovsVsctl); err != nil { + if err = updateOvsInterfaceMetrics(ovsDBClient); err != nil { klog.Errorf("Updating OVS interface metrics failed: %s", err.Error()) } case <-stopChan: @@ -532,48 +524,29 @@ func ovsInterfaceMetricsUpdater(ovsVsctl ovsClient, tickPeriod time.Duration, st } } -// updateOvsInterfaceMetrics updates the ovs interface metrics obtained from ovs-vsctl --columns= list interface -func updateOvsInterfaceMetrics(ovsVsctl ovsClient) error { - var stdout, stderr string - var err error - - stdout, stderr, err = ovsVsctl("--no-headings", "--data=bare", - "--format=csv", "--columns=link_resets,statistics", "list", "Interface") +// updateOvsInterfaceMetrics updates the ovs interface metrics obtained from ovsdb +func updateOvsInterfaceMetrics(ovsDBClient libovsdbclient.Client) error { + interfaceList, err := ovsops.ListInterfaces(ovsDBClient) if err != nil { - return fmt.Errorf("failed to get output for ovs-vsctl list Interface "+ - "stderr(%s) :(%v)", stderr, err) + return fmt.Errorf("failed to get ovsdb interface table :(%v)", err) } - if stderr != "" { - return fmt.Errorf("failed to get OVS interface metrics due to stderr: %s", stderr) - } - if stdout == "" { - return fmt.Errorf("unable to update OVS interface metrics because blank output received from OVS client") + var interfaceStats = []string{ + "rx_dropped", + "rx_errors", + "tx_dropped", + "tx_errors", + "collisions", } - var linkReset, rxDropped, txDropped, rxErr, txErr, collisions, statValue, interfaceCount float64 - for _, kvPair := range strings.Split(stdout, "\n") { - if kvPair == "" { - continue - } - interfaceFieldValues := strings.Split(kvPair, ",") - if len(interfaceFieldValues) != 2 { - return fmt.Errorf("unexpected data format received while trying to get OVS interface metrics: %s", stdout) - } - statValue, err = strconv.ParseFloat(interfaceFieldValues[0], 64) - if err != nil { - return fmt.Errorf("expected string to contain an integer. Failed to get OVS interface metrics: %v", err) - } - linkReset += statValue - interfaceCount++ - // sum statistics - for _, field := range strings.Fields(interfaceFieldValues[1]) { - statsField := strings.Split(field, "=") - statName := strings.TrimSpace(statsField[0]) - statValue, err = strconv.ParseFloat(statsField[1], 64) - if err != nil { - return fmt.Errorf("expected string %q to contain an integer. Failed to get OVS interface metrics: %v", - interfaceFieldValues[1], err) - } + var linkReset, rxDropped, txDropped, rxErr, txErr, collisions, statValue float64 + for _, intf := range interfaceList { + linkReset += convertToFloat64(intf.LinkResets) + + for _, statName := range interfaceStats { + statValue = 0 + if value, ok := intf.Statistics[statName]; ok { + statValue = float64(value) + } switch statName { case "rx_dropped": rxDropped += statValue @@ -588,7 +561,7 @@ func updateOvsInterfaceMetrics(ovsVsctl ovsClient) error { } } } - metricOvsInterfaceTotal.Set(interfaceCount) + metricOvsInterfaceTotal.Set(float64(len(interfaceList))) metricOvsInterfaceResetsTotal.Set(linkReset) metricOvsInterfaceRxDroppedTotal.Set(rxDropped) metricOvsInterfaceTxDroppedTotal.Set(txDropped) @@ -630,8 +603,8 @@ func setOvsMemoryMetrics(ovsVswitchdAppctl ovsClient) (err error) { return nil } -func ovsMemoryMetricsUpdater(ovsVswitchdAppctl ovsClient, tickPeriod time.Duration, stopChan <-chan struct{}) { - ticker := time.NewTicker(tickPeriod) +func ovsMemoryMetricsUpdater(ovsVswitchdAppctl ovsClient, metricsScrapeInterval int, stopChan <-chan struct{}) { + ticker := time.NewTicker(time.Duration(metricsScrapeInterval) * time.Second) defer ticker.Stop() for { select { @@ -645,25 +618,13 @@ func ovsMemoryMetricsUpdater(ovsVswitchdAppctl ovsClient, tickPeriod time.Durati } } -// setOvsHwOffloadMetrics obatains the hw-offlaod, tc-policy -// ovs-vsctl list Open_vSwitch . and updates the corresponding metrics -func setOvsHwOffloadMetrics(ovsVsctl ovsClient) (err error) { - var stdout, stderr string - - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("recovering from panic while parsing the ovs-vsctl "+ - "list Open_vSwitch . output : %v", r) - } - }() - - stdout, stderr, err = ovsVsctl("--no-headings", "--data=bare", - "--columns=other_config", "list", "Open_vSwitch", ".") +// setOvsHwOffloadMetrics updates the hw-offload, tc-policy metrics +// obtained from Open_vSwitch table updates +func setOvsHwOffloadMetrics(ovsDBClient libovsdbclient.Client) (err error) { + openvSwitch, err := ovsops.GetOpenvSwitch(ovsDBClient) if err != nil { - return fmt.Errorf("failed to get output from ovs-vsctl list --columns=other_config"+ - "open_vSwitch . stderr(%s) : %v", stderr, err) + return fmt.Errorf("failed to get ovsdb openvswitch entry :(%v)", err) } - var hwOffloadValue = "false" var tcPolicyValue = "none" var tcPolicyMap = map[string]float64{ @@ -671,30 +632,31 @@ func setOvsHwOffloadMetrics(ovsVsctl ovsClient) (err error) { "skip_sw": 1, "skip_hw": 2, } - for _, kvPair := range strings.Fields(stdout) { - if strings.HasPrefix(kvPair, "hw-offload=") { - hwOffloadValue = strings.Split(kvPair, "=")[1] - } else if strings.HasPrefix(kvPair, "tc-policy=") { - tcPolicyValue = strings.Split(kvPair, "=")[1] - } - } + // set the hw-offload metric + if val, ok := openvSwitch.OtherConfig["hw-offload"]; ok { + hwOffloadValue = val + } if hwOffloadValue == "false" { metricOvsHwOffload.Set(0) } else { metricOvsHwOffload.Set(1) } + // set tc-policy metric + if val, ok := openvSwitch.OtherConfig["tc-policy"]; ok { + tcPolicyValue = val + } metricOvsTcPolicy.Set(tcPolicyMap[tcPolicyValue]) return nil } -func ovsHwOffloadMetricsUpdater(ovsVsctl ovsClient, tickPeriod time.Duration, stopChan <-chan struct{}) { - ticker := time.NewTicker(tickPeriod) +func ovsHwOffloadMetricsUpdater(ovsDBClient libovsdbclient.Client, metricsScrapeInterval int, stopChan <-chan struct{}) { + ticker := time.NewTicker(time.Duration(metricsScrapeInterval) * time.Second) defer ticker.Stop() for { select { case <-ticker.C: - if err := setOvsHwOffloadMetrics(ovsVsctl); err != nil { + if err := setOvsHwOffloadMetrics(ovsDBClient); err != nil { klog.Errorf("Setting ovs hardware offload metrics failed: %s", err.Error()) } case <-stopChan: @@ -868,17 +830,17 @@ var ovsVswitchdCoverageShowMetricsMap = map[string]*metricDetails{ } var registerOvsMetricsOnce sync.Once -func RegisterStandaloneOvsMetrics(stopChan <-chan struct{}) { - registerOvsMetrics(prometheus.DefaultRegisterer, stopChan) +func RegisterStandaloneOvsMetrics(ovsDBClient libovsdbclient.Client, metricsScrapeInterval int, stopChan <-chan struct{}) { + registerOvsMetrics(ovsDBClient, metricsScrapeInterval, prometheus.DefaultRegisterer, stopChan) } -func RegisterOvsMetricsWithOvnMetrics(stopChan <-chan struct{}) { - registerOvsMetrics(ovnRegistry, stopChan) +func RegisterOvsMetricsWithOvnMetrics(ovsDBClient libovsdbclient.Client, metricsScrapeInterval int, stopChan <-chan struct{}) { + registerOvsMetrics(ovsDBClient, metricsScrapeInterval, ovnRegistry, stopChan) } -func registerOvsMetrics(registry prometheus.Registerer, stopChan <-chan struct{}) { +func registerOvsMetrics(ovsDBClient libovsdbclient.Client, metricsScrapeInterval int, registry prometheus.Registerer, stopChan <-chan struct{}) { registerOvsMetricsOnce.Do(func() { - getOvsVersionInfo() + getOvsVersionInfo(ovsDBClient) registry.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ Namespace: MetricOvsNamespace, @@ -941,15 +903,15 @@ func registerOvsMetrics(registry prometheus.Registerer, stopChan <-chan struct{} } // OVS datapath metrics updater - go ovsDatapathMetricsUpdater(util.RunOVSAppctl, 30*time.Second, stopChan) + go ovsDatapathMetricsUpdater(util.RunOVSAppctl, metricsScrapeInterval, stopChan) // OVS bridge metrics updater - go ovsBridgeMetricsUpdater(util.RunOVSVsctl, util.RunOVSOfctl, 30*time.Second, stopChan) + go ovsBridgeMetricsUpdater(ovsDBClient, util.RunOVSOfctl, metricsScrapeInterval, stopChan) // OVS interface metrics updater - go ovsInterfaceMetricsUpdater(util.RunOVSVsctl, 30*time.Second, stopChan) + go ovsInterfaceMetricsUpdater(ovsDBClient, metricsScrapeInterval, stopChan) // OVS memory metrics updater - go ovsMemoryMetricsUpdater(util.RunOvsVswitchdAppCtl, 30*time.Second, stopChan) + go ovsMemoryMetricsUpdater(util.RunOvsVswitchdAppCtl, metricsScrapeInterval, stopChan) // OVS hw Offload metrics updater - go ovsHwOffloadMetricsUpdater(util.RunOVSVsctl, 30*time.Second, stopChan) + go ovsHwOffloadMetricsUpdater(ovsDBClient, metricsScrapeInterval, stopChan) // OVS coverage/show metrics updater. go coverageShowMetricsUpdater(ovsVswitchd, stopChan) }) diff --git a/go-controller/pkg/metrics/ovs_test.go b/go-controller/pkg/metrics/ovs_test.go index 56ea743e42..da0795f890 100644 --- a/go-controller/pkg/metrics/ovs_test.go +++ b/go-controller/pkg/metrics/ovs_test.go @@ -2,11 +2,15 @@ package metrics import ( "fmt" + "sync/atomic" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics/mocks" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cryptorand" + libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/vswitchd" "github.com/prometheus/client_golang/prometheus" ) @@ -31,16 +35,70 @@ func (c *fakeOVSClient) FakeCall(args ...string) (string, string, error) { return output.stdout, output.stderr, output.err } +// buildNamedUUID builds an id that can be used as a named-uuid +func buildUUID() string { + namedUUIDPrefix := 'u' + namedUUIDCounter := cryptorand.Uint32() + return fmt.Sprintf("%c%010d", namedUUIDPrefix, atomic.AddUint32(&namedUUIDCounter, 1)) +} + const ( ovsAppctlDumpAggregateSampleOutput = "NXST_AGGREGATE reply (xid=0x4): packet_count=856244 byte_count=3464651294 flow_count=30" - ovsVsctlListBridgeOutput = "br-int,porta portb portc\nbr-ex,portd porte" - ovsVsctlListInterfaceOutput = "1,collisions=10 rx_bytes=0 rx_crc_err=0 rx_dropped=5 rx_errors=100 rx_frame_err=0 rx_missed_errors=0 rx_over_err=0 rx_packets=0 tx_bytes=0 tx_dropped=50 tx_errors=20 tx_packets=0\n1,rx_bytes=0 rx_packets=1000 tx_bytes=0 tx_packets=80\n0,collisions=10 rx_bytes=0 rx_crc_err=0 rx_dropped=5 rx_errors=100 rx_frame_err=0 rx_missed_errors=0 rx_over_err=0 rx_packets=0 tx_bytes=0 tx_dropped=50 tx_errors=20 tx_packets=0" ) var _ = ginkgo.Describe("OVS metrics", func() { var stopChan chan struct{} var resetsTotalMock, rxDroppedTotalMock, txDroppedTotalMock *mocks.GaugeMock var rxErrorsTotalMock, txErrorsTotalMock, collisionsTotalMock, bridgeTotalMock *mocks.GaugeMock + var hwOffloadMock, tcPolicyMock *mocks.GaugeMock + + linkResets := 1 + intf1 := vswitchd.Interface{Name: "porta", UUID: buildUUID()} + intf2 := vswitchd.Interface{Name: "portb", UUID: buildUUID()} + intf3 := vswitchd.Interface{Name: "portc", UUID: buildUUID()} + intf4 := vswitchd.Interface{Name: "portd", UUID: buildUUID()} + intf5 := vswitchd.Interface{Name: "porte", UUID: buildUUID()} + port1 := vswitchd.Port{Name: "porta", UUID: buildUUID()} + port2 := vswitchd.Port{Name: "portb", UUID: buildUUID()} + port3 := vswitchd.Port{Name: "portc", UUID: buildUUID()} + port4 := vswitchd.Port{Name: "portd", UUID: buildUUID()} + port5 := vswitchd.Port{Name: "porte", UUID: buildUUID()} + br1 := vswitchd.Bridge{Name: "br-int", UUID: buildUUID()} + br2 := vswitchd.Bridge{Name: "br-ex", UUID: buildUUID()} + + testDB := []libovsdbtest.TestData{ + &vswitchd.Interface{UUID: intf1.UUID, Name: intf1.Name, + LinkResets: &linkResets, Statistics: map[string]int{"collisions": 10, + "rx_bytes": 0, "rx_crc_err": 0, "rx_dropped": 5, "rx_errors": 100, + "rx_frame_err": 0, "rx_missed_errors": 0, "rx_over_err": 0, "rx_packets": 0, + "tx_bytes": 0, "tx_dropped": 50, "tx_errors": 20, "tx_packets": 0}}, + &vswitchd.Interface{UUID: intf2.UUID, Name: intf2.Name, + LinkResets: &linkResets, Statistics: map[string]int{"rx_bytes": 0, + "rx_packets": 1000, "tx_bytes": 0, "tx_packets": 80}}, + &vswitchd.Interface{UUID: intf3.UUID, Name: intf3.Name, + Statistics: map[string]int{"collisions": 10, + "rx_bytes": 0, "rx_crc_err": 0, "rx_dropped": 5, "rx_errors": 100, + "rx_frame_err": 0, "rx_missed_errors": 0, "rx_over_err": 0, "rx_packets": 0, + "tx_bytes": 0, "tx_dropped": 50, "tx_errors": 20, "tx_packets": 0}}, + &vswitchd.Interface{UUID: intf4.UUID, Name: intf4.Name}, + &vswitchd.Interface{UUID: intf5.UUID, Name: intf5.Name}, + &vswitchd.Port{UUID: port1.UUID, Name: port1.Name, + Interfaces: []string{intf1.UUID}}, + &vswitchd.Port{UUID: port2.UUID, Name: port2.Name, + Interfaces: []string{intf2.UUID}}, + &vswitchd.Port{UUID: port3.UUID, Name: port3.Name, + Interfaces: []string{intf3.UUID}}, + &vswitchd.Port{UUID: port4.UUID, Name: port4.Name, + Interfaces: []string{intf4.UUID}}, + &vswitchd.Port{UUID: port5.UUID, Name: port5.Name, + Interfaces: []string{intf5.UUID}}, + &vswitchd.Bridge{UUID: br1.UUID, Name: br1.Name, Ports: []string{port1.UUID, port2.UUID, port3.UUID}}, + &vswitchd.Bridge{UUID: br2.UUID, Name: br2.Name, Ports: []string{port4.UUID, port5.UUID}}, + &vswitchd.OpenvSwitch{UUID: "root-ovs", Bridges: []string{br1.UUID, br2.UUID}}, + } + dbSetup := libovsdbtest.TestSetup{ + OVSData: testDB, + } ginkgo.BeforeEach(func() { // replace all the prom gauges with mocks @@ -53,16 +111,11 @@ var _ = ginkgo.Describe("OVS metrics", func() { close(stopChan) }) - ginkgo.Context("On update bridge metrics", func() { + ginkgo.Context("On update of bridge metrics", func() { ginkgo.It("sets bridge metrics when input valid", func() { - ovsVsctlOutput := []clientOutput{ - { - stdout: ovsVsctlListBridgeOutput, - stderr: "", - err: nil, - }, - } - ovsVsctl := NewFakeOVSClient(ovsVsctlOutput) + + ovsClient, libovsdbCleanup, err := libovsdbtest.NewOVSTestHarness(dbSetup) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ovsOfctlOutput := []clientOutput{ { stdout: ovsAppctlDumpAggregateSampleOutput, @@ -76,7 +129,7 @@ var _ = ginkgo.Describe("OVS metrics", func() { }, } ovsOfctl := NewFakeOVSClient(ovsOfctlOutput) - err := updateOvsBridgeMetrics(ovsVsctl.FakeCall, ovsOfctl.FakeCall) + err = updateOvsBridgeMetrics(ovsClient, ovsOfctl.FakeCall) gomega.Expect(err).To(gomega.BeNil()) // There is no easy way (that I can think of besides creating my own interface - none exist upstream) to // mock prometheus.gaugevec. @@ -89,59 +142,12 @@ var _ = ginkgo.Describe("OVS metrics", func() { metricOvsBridgeFlowsTotal.Collect(ovsBridgesCh) gomega.Expect(ovsBridgesCh).Should(gomega.HaveLen(6)) gomega.Expect(bridgeTotalMock.GetValue()).Should(gomega.BeNumerically("==", 2)) - }) - - ginkgo.It("returns error when OVS vsctl client returns an error", func() { - ovsVsctlOutput := []clientOutput{ - { - stdout: "", - stderr: "", - err: fmt.Errorf("could not connect to ovsdb"), - }, - } - ovsVsctl := NewFakeOVSClient(ovsVsctlOutput) - ovsAppctl := NewFakeOVSClient([]clientOutput{}) - err := updateOvsBridgeMetrics(ovsVsctl.FakeCall, ovsAppctl.FakeCall) - gomega.Expect(err).ToNot(gomega.BeNil()) - }) - - ginkgo.It("returns error when OVS vsctl client returns non-blank stderr", func() { - ovsVsctlOutput := []clientOutput{ - { - stdout: "", - stderr: "big bad error", - err: nil, - }, - } - ovsVsctl := NewFakeOVSClient(ovsVsctlOutput) - ovsAppctl := NewFakeOVSClient([]clientOutput{}) - err := updateOvsBridgeMetrics(ovsVsctl.FakeCall, ovsAppctl.FakeCall) - gomega.Expect(err).ToNot(gomega.BeNil()) - }) - - ginkgo.It("returns error when OVS vsctl client returns a blank output", func() { - ovsVsctlOutput := []clientOutput{ - { - stdout: "", - stderr: "", - err: nil, - }, - } - ovsVsctl := NewFakeOVSClient(ovsVsctlOutput) - ovsAppctl := NewFakeOVSClient([]clientOutput{}) - err := updateOvsBridgeMetrics(ovsVsctl.FakeCall, ovsAppctl.FakeCall) - gomega.Expect(err).ToNot(gomega.BeNil()) + libovsdbCleanup.Cleanup() }) ginkgo.It("returns error when OVS appctl client returns an error", func() { - ovsVsctlOutput := []clientOutput{ - { - stdout: ovsVsctlListBridgeOutput, - stderr: "", - err: nil, - }, - } - ovsVsctl := NewFakeOVSClient(ovsVsctlOutput) + ovsClient, libovsdbCleanup, err := libovsdbtest.NewOVSTestHarness(dbSetup) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ovsAppctlOutput := []clientOutput{ { stdout: "", @@ -150,19 +156,14 @@ var _ = ginkgo.Describe("OVS metrics", func() { }, } ovsAppctl := NewFakeOVSClient(ovsAppctlOutput) - err := updateOvsBridgeMetrics(ovsVsctl.FakeCall, ovsAppctl.FakeCall) + err = updateOvsBridgeMetrics(ovsClient, ovsAppctl.FakeCall) gomega.Expect(err).ToNot(gomega.BeNil()) + libovsdbCleanup.Cleanup() }) ginkgo.It("returns error when OVS appctl returns non-blank stderr", func() { - ovsVsctlOutput := []clientOutput{ - { - stdout: ovsVsctlListBridgeOutput, - stderr: "", - err: nil, - }, - } - ovsVsctl := NewFakeOVSClient(ovsVsctlOutput) + ovsClient, libovsdbCleanup, err := libovsdbtest.NewOVSTestHarness(dbSetup) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ovsAppctlOutput := []clientOutput{ { stdout: "", @@ -171,19 +172,14 @@ var _ = ginkgo.Describe("OVS metrics", func() { }, } ovsAppctl := NewFakeOVSClient(ovsAppctlOutput) - err := updateOvsBridgeMetrics(ovsVsctl.FakeCall, ovsAppctl.FakeCall) + err = updateOvsBridgeMetrics(ovsClient, ovsAppctl.FakeCall) gomega.Expect(err).ToNot(gomega.BeNil()) + libovsdbCleanup.Cleanup() }) ginkgo.It("returns error when OVS appctl client returns a blank output", func() { - ovsVsctlOutput := []clientOutput{ - { - stdout: ovsVsctlListBridgeOutput, - stderr: "", - err: nil, - }, - } - ovsVsctl := NewFakeOVSClient(ovsVsctlOutput) + ovsClient, libovsdbCleanup, err := libovsdbtest.NewOVSTestHarness(dbSetup) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ovsAppctlOutput := []clientOutput{ { stdout: "", @@ -192,8 +188,9 @@ var _ = ginkgo.Describe("OVS metrics", func() { }, } ovsAppctl := NewFakeOVSClient(ovsAppctlOutput) - err := updateOvsBridgeMetrics(ovsVsctl.FakeCall, ovsAppctl.FakeCall) + err = updateOvsBridgeMetrics(ovsClient, ovsAppctl.FakeCall) gomega.Expect(err).ToNot(gomega.BeNil()) + libovsdbCleanup.Cleanup() }) }) @@ -215,15 +212,9 @@ var _ = ginkgo.Describe("OVS metrics", func() { }) ginkgo.It("sets interface metrics when input is valid", func() { - ovsVsctlOutput := []clientOutput{ - { - stdout: ovsVsctlListInterfaceOutput, - stderr: "", - err: nil, - }, - } - ovsVsctl := NewFakeOVSClient(ovsVsctlOutput) - err := updateOvsInterfaceMetrics(ovsVsctl.FakeCall) + ovsClient, libovsdbCleanup, err := libovsdbtest.NewOVSTestHarness(dbSetup) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = updateOvsInterfaceMetrics(ovsClient) gomega.Expect(err).Should(gomega.BeNil()) gomega.Expect(resetsTotalMock.GetValue()).Should(gomega.BeNumerically("==", 2)) gomega.Expect(rxDroppedTotalMock.GetValue()).Should(gomega.BeNumerically("==", 10)) @@ -231,45 +222,47 @@ var _ = ginkgo.Describe("OVS metrics", func() { gomega.Expect(rxErrorsTotalMock.GetValue()).Should(gomega.BeNumerically("==", 200)) gomega.Expect(txErrorsTotalMock.GetValue()).Should(gomega.BeNumerically("==", 40)) gomega.Expect(collisionsTotalMock.GetValue()).Should(gomega.BeNumerically("==", 20)) + libovsdbCleanup.Cleanup() }) + }) - ginkgo.It("returns error when OVS vsctl client returns an error", func() { - ovsVsctlOutput := []clientOutput{ - { - stdout: "", - stderr: "", - err: fmt.Errorf("could not connect to ovsdb"), - }, - } - ovsVsctl := NewFakeOVSClient(ovsVsctlOutput) - err := updateOvsInterfaceMetrics(ovsVsctl.FakeCall) - gomega.Expect(err).ToNot(gomega.BeNil()) + ginkgo.Context("On update of OVS HwOffload metrics", func() { + ginkgo.BeforeEach(func() { + // replace all the prom gauges with mocks + hwOffloadMock = mocks.NewGaugeMock() + metricOvsHwOffload = hwOffloadMock + tcPolicyMock = mocks.NewGaugeMock() + metricOvsTcPolicy = tcPolicyMock }) - ginkgo.It("returns error when OVS vsctl client returns non-blank stderr", func() { - ovsVsctlOutput := []clientOutput{ - { - stdout: "", - stderr: "", - err: fmt.Errorf("could not connect to ovsdb"), - }, + ginkgo.It("sets Hw offload metrics when input is valid", func() { + testDB := []libovsdbtest.TestData{ + &vswitchd.OpenvSwitch{UUID: "root-ovs", OtherConfig: map[string]string{ + "hw-offload": "true", "tc-policy": "skip_sw"}}, } - ovsVsctl := NewFakeOVSClient(ovsVsctlOutput) - err := updateOvsInterfaceMetrics(ovsVsctl.FakeCall) - gomega.Expect(err).ToNot(gomega.BeNil()) + + dbSetup := libovsdbtest.TestSetup{ + OVSData: testDB, + } + ovsClient, libovsdbCleanup, err := libovsdbtest.NewOVSTestHarness(dbSetup) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = setOvsHwOffloadMetrics(ovsClient) + gomega.Expect(err).Should(gomega.BeNil()) + gomega.Expect(hwOffloadMock.GetValue()).Should(gomega.BeNumerically("==", 1)) + gomega.Expect(tcPolicyMock.GetValue()).Should(gomega.BeNumerically("==", 1)) + libovsdbCleanup.Cleanup() }) + ginkgo.It("returns error when openvswitch table is not found", func() { + testDB := []libovsdbtest.TestData{} - ginkgo.It("returns error when OVS vsctl client returns a blank output", func() { - ovsVsctlOutput := []clientOutput{ - { - stdout: "", - stderr: "", - err: nil, - }, + dbSetup := libovsdbtest.TestSetup{ + OVSData: testDB, } - ovsVsctl := NewFakeOVSClient(ovsVsctlOutput) - err := updateOvsInterfaceMetrics(ovsVsctl.FakeCall) + ovsClient, libovsdbCleanup, err := libovsdbtest.NewOVSTestHarness(dbSetup) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = setOvsHwOffloadMetrics(ovsClient) gomega.Expect(err).ToNot(gomega.BeNil()) + libovsdbCleanup.Cleanup() }) }) }) diff --git a/go-controller/pkg/nbdb/acl.go b/go-controller/pkg/nbdb/acl.go index 2cfde033cb..0c2840c178 100644 --- a/go-controller/pkg/nbdb/acl.go +++ b/go-controller/pkg/nbdb/acl.go @@ -42,6 +42,8 @@ type ACL struct { Name *string `ovsdb:"name"` Options map[string]string `ovsdb:"options"` Priority int `ovsdb:"priority"` + SampleEst *string `ovsdb:"sample_est"` + SampleNew *string `ovsdb:"sample_new"` Severity *ACLSeverity `ovsdb:"severity"` Tier int `ovsdb:"tier"` } @@ -178,6 +180,50 @@ func (a *ACL) GetPriority() int { return a.Priority } +func (a *ACL) GetSampleEst() *string { + return a.SampleEst +} + +func copyACLSampleEst(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalACLSampleEst(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *ACL) GetSampleNew() *string { + return a.SampleNew +} + +func copyACLSampleNew(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalACLSampleNew(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + func (a *ACL) GetSeverity() *ACLSeverity { return a.Severity } @@ -210,6 +256,8 @@ func (a *ACL) DeepCopyInto(b *ACL) { b.Meter = copyACLMeter(a.Meter) b.Name = copyACLName(a.Name) b.Options = copyACLOptions(a.Options) + b.SampleEst = copyACLSampleEst(a.SampleEst) + b.SampleNew = copyACLSampleNew(a.SampleNew) b.Severity = copyACLSeverity(a.Severity) } @@ -240,6 +288,8 @@ func (a *ACL) Equals(b *ACL) bool { equalACLName(a.Name, b.Name) && equalACLOptions(a.Options, b.Options) && a.Priority == b.Priority && + equalACLSampleEst(a.SampleEst, b.SampleEst) && + equalACLSampleNew(a.SampleNew, b.SampleNew) && equalACLSeverity(a.Severity, b.Severity) && a.Tier == b.Tier } diff --git a/go-controller/pkg/nbdb/dhcp_relay.go b/go-controller/pkg/nbdb/dhcp_relay.go new file mode 100644 index 0000000000..f0e973ab78 --- /dev/null +++ b/go-controller/pkg/nbdb/dhcp_relay.go @@ -0,0 +1,145 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const DHCPRelayTable = "DHCP_Relay" + +// DHCPRelay defines an object in DHCP_Relay table +type DHCPRelay struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Name string `ovsdb:"name"` + Options map[string]string `ovsdb:"options"` + Servers *string `ovsdb:"servers"` +} + +func (a *DHCPRelay) GetUUID() string { + return a.UUID +} + +func (a *DHCPRelay) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyDHCPRelayExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalDHCPRelayExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *DHCPRelay) GetName() string { + return a.Name +} + +func (a *DHCPRelay) GetOptions() map[string]string { + return a.Options +} + +func copyDHCPRelayOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalDHCPRelayOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *DHCPRelay) GetServers() *string { + return a.Servers +} + +func copyDHCPRelayServers(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalDHCPRelayServers(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *DHCPRelay) DeepCopyInto(b *DHCPRelay) { + *b = *a + b.ExternalIDs = copyDHCPRelayExternalIDs(a.ExternalIDs) + b.Options = copyDHCPRelayOptions(a.Options) + b.Servers = copyDHCPRelayServers(a.Servers) +} + +func (a *DHCPRelay) DeepCopy() *DHCPRelay { + b := new(DHCPRelay) + a.DeepCopyInto(b) + return b +} + +func (a *DHCPRelay) CloneModelInto(b model.Model) { + c := b.(*DHCPRelay) + a.DeepCopyInto(c) +} + +func (a *DHCPRelay) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *DHCPRelay) Equals(b *DHCPRelay) bool { + return a.UUID == b.UUID && + equalDHCPRelayExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Name == b.Name && + equalDHCPRelayOptions(a.Options, b.Options) && + equalDHCPRelayServers(a.Servers, b.Servers) +} + +func (a *DHCPRelay) EqualsModel(b model.Model) bool { + c := b.(*DHCPRelay) + return a.Equals(c) +} + +var _ model.CloneableModel = &DHCPRelay{} +var _ model.ComparableModel = &DHCPRelay{} diff --git a/go-controller/pkg/nbdb/logical_router_port.go b/go-controller/pkg/nbdb/logical_router_port.go index dbe4ea8708..d39fe0db42 100644 --- a/go-controller/pkg/nbdb/logical_router_port.go +++ b/go-controller/pkg/nbdb/logical_router_port.go @@ -10,6 +10,7 @@ const LogicalRouterPortTable = "Logical_Router_Port" // LogicalRouterPort defines an object in Logical_Router_Port table type LogicalRouterPort struct { UUID string `ovsdb:"_uuid"` + DhcpRelay *string `ovsdb:"dhcp_relay"` Enabled *bool `ovsdb:"enabled"` ExternalIDs map[string]string `ovsdb:"external_ids"` GatewayChassis []string `ovsdb:"gateway_chassis"` @@ -28,6 +29,28 @@ func (a *LogicalRouterPort) GetUUID() string { return a.UUID } +func (a *LogicalRouterPort) GetDhcpRelay() *string { + return a.DhcpRelay +} + +func copyLogicalRouterPortDhcpRelay(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalRouterPortDhcpRelay(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + func (a *LogicalRouterPort) GetEnabled() *bool { return a.Enabled } @@ -308,6 +331,7 @@ func equalLogicalRouterPortStatus(a, b map[string]string) bool { func (a *LogicalRouterPort) DeepCopyInto(b *LogicalRouterPort) { *b = *a + b.DhcpRelay = copyLogicalRouterPortDhcpRelay(a.DhcpRelay) b.Enabled = copyLogicalRouterPortEnabled(a.Enabled) b.ExternalIDs = copyLogicalRouterPortExternalIDs(a.ExternalIDs) b.GatewayChassis = copyLogicalRouterPortGatewayChassis(a.GatewayChassis) @@ -337,6 +361,7 @@ func (a *LogicalRouterPort) CloneModel() model.Model { func (a *LogicalRouterPort) Equals(b *LogicalRouterPort) bool { return a.UUID == b.UUID && + equalLogicalRouterPortDhcpRelay(a.DhcpRelay, b.DhcpRelay) && equalLogicalRouterPortEnabled(a.Enabled, b.Enabled) && equalLogicalRouterPortExternalIDs(a.ExternalIDs, b.ExternalIDs) && equalLogicalRouterPortGatewayChassis(a.GatewayChassis, b.GatewayChassis) && diff --git a/go-controller/pkg/nbdb/model.go b/go-controller/pkg/nbdb/model.go index d6809ef08a..daabac4530 100644 --- a/go-controller/pkg/nbdb/model.go +++ b/go-controller/pkg/nbdb/model.go @@ -20,6 +20,7 @@ func FullDatabaseModel() (model.ClientDBModel, error) { "Connection": &Connection{}, "Copp": &Copp{}, "DHCP_Options": &DHCPOptions{}, + "DHCP_Relay": &DHCPRelay{}, "DNS": &DNS{}, "Forwarding_Group": &ForwardingGroup{}, "Gateway_Chassis": &GatewayChassis{}, @@ -42,13 +43,16 @@ func FullDatabaseModel() (model.ClientDBModel, error) { "Port_Group": &PortGroup{}, "QoS": &QoS{}, "SSL": &SSL{}, + "Sample": &Sample{}, + "Sample_Collector": &SampleCollector{}, + "Sampling_App": &SamplingApp{}, "Static_MAC_Binding": &StaticMACBinding{}, }) } var schema = `{ "name": "OVN_Northbound", - "version": "7.3.0", + "version": "7.6.0", "tables": { "ACL": { "columns": { @@ -152,6 +156,28 @@ var schema = `{ } } }, + "sample_est": { + "type": { + "key": { + "type": "uuid", + "refTable": "Sample", + "refType": "strong" + }, + "min": 0, + "max": 1 + } + }, + "sample_new": { + "type": { + "key": { + "type": "uuid", + "refTable": "Sample", + "refType": "strong" + }, + "min": 0, + "max": 1 + } + }, "severity": { "type": { "key": { @@ -482,6 +508,47 @@ var schema = `{ }, "isRoot": true }, + "DHCP_Relay": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "servers": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + } + }, + "isRoot": true + }, "DNS": { "columns": { "external_ids": { @@ -1034,6 +1101,17 @@ var schema = `{ }, "Logical_Router_Port": { "columns": { + "dhcp_relay": { + "type": { + "key": { + "type": "uuid", + "refTable": "DHCP_Relay", + "refType": "strong" + }, + "min": 0, + "max": 1 + } + }, "enabled": { "type": { "key": { @@ -1740,6 +1818,9 @@ var schema = `{ "max": 1 } }, + "match": { + "type": "string" + }, "options": { "type": { "key": { @@ -1752,6 +1833,15 @@ var schema = `{ "max": "unlimited" } }, + "priority": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 32767 + } + } + }, "type": { "type": { "key": { @@ -2007,6 +2097,135 @@ var schema = `{ } } }, + "Sample": { + "columns": { + "collectors": { + "type": { + "key": { + "type": "uuid", + "refTable": "Sample_Collector", + "refType": "strong" + }, + "min": 0, + "max": "unlimited" + } + }, + "metadata": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 4294967295 + }, + "min": 1, + "max": 1 + } + } + }, + "indexes": [ + [ + "metadata" + ] + ] + }, + "Sample_Collector": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "id": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 255 + } + } + }, + "name": { + "type": "string" + }, + "probability": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 65535 + } + } + }, + "set_id": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 4294967295 + } + } + } + }, + "indexes": [ + [ + "id" + ] + ], + "isRoot": true + }, + "Sampling_App": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "id": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 255 + } + } + }, + "type": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "drop", + "acl-new", + "acl-est" + ] + ] + } + } + } + }, + "indexes": [ + [ + "type" + ] + ], + "isRoot": true + }, "Static_MAC_Binding": { "columns": { "ip": { diff --git a/go-controller/pkg/nbdb/nat.go b/go-controller/pkg/nbdb/nat.go index 3286bbde2c..4bd1b7ed49 100644 --- a/go-controller/pkg/nbdb/nat.go +++ b/go-controller/pkg/nbdb/nat.go @@ -29,7 +29,9 @@ type NAT struct { GatewayPort *string `ovsdb:"gateway_port"` LogicalIP string `ovsdb:"logical_ip"` LogicalPort *string `ovsdb:"logical_port"` + Match string `ovsdb:"match"` Options map[string]string `ovsdb:"options"` + Priority int `ovsdb:"priority"` Type NATType `ovsdb:"type"` } @@ -189,6 +191,10 @@ func equalNATLogicalPort(a, b *string) bool { return *a == *b } +func (a *NAT) GetMatch() string { + return a.Match +} + func (a *NAT) GetOptions() map[string]string { return a.Options } @@ -219,6 +225,10 @@ func equalNATOptions(a, b map[string]string) bool { return true } +func (a *NAT) GetPriority() int { + return a.Priority +} + func (a *NAT) GetType() NATType { return a.Type } @@ -260,7 +270,9 @@ func (a *NAT) Equals(b *NAT) bool { equalNATGatewayPort(a.GatewayPort, b.GatewayPort) && a.LogicalIP == b.LogicalIP && equalNATLogicalPort(a.LogicalPort, b.LogicalPort) && + a.Match == b.Match && equalNATOptions(a.Options, b.Options) && + a.Priority == b.Priority && a.Type == b.Type } diff --git a/go-controller/pkg/nbdb/sample.go b/go-controller/pkg/nbdb/sample.go new file mode 100644 index 0000000000..639393a1e6 --- /dev/null +++ b/go-controller/pkg/nbdb/sample.go @@ -0,0 +1,85 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const SampleTable = "Sample" + +// Sample defines an object in Sample table +type Sample struct { + UUID string `ovsdb:"_uuid"` + Collectors []string `ovsdb:"collectors"` + Metadata int `ovsdb:"metadata"` +} + +func (a *Sample) GetUUID() string { + return a.UUID +} + +func (a *Sample) GetCollectors() []string { + return a.Collectors +} + +func copySampleCollectors(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalSampleCollectors(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Sample) GetMetadata() int { + return a.Metadata +} + +func (a *Sample) DeepCopyInto(b *Sample) { + *b = *a + b.Collectors = copySampleCollectors(a.Collectors) +} + +func (a *Sample) DeepCopy() *Sample { + b := new(Sample) + a.DeepCopyInto(b) + return b +} + +func (a *Sample) CloneModelInto(b model.Model) { + c := b.(*Sample) + a.DeepCopyInto(c) +} + +func (a *Sample) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Sample) Equals(b *Sample) bool { + return a.UUID == b.UUID && + equalSampleCollectors(a.Collectors, b.Collectors) && + a.Metadata == b.Metadata +} + +func (a *Sample) EqualsModel(b model.Model) bool { + c := b.(*Sample) + return a.Equals(c) +} + +var _ model.CloneableModel = &Sample{} +var _ model.ComparableModel = &Sample{} diff --git a/go-controller/pkg/nbdb/sample_collector.go b/go-controller/pkg/nbdb/sample_collector.go new file mode 100644 index 0000000000..50f0659040 --- /dev/null +++ b/go-controller/pkg/nbdb/sample_collector.go @@ -0,0 +1,105 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const SampleCollectorTable = "Sample_Collector" + +// SampleCollector defines an object in Sample_Collector table +type SampleCollector struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + ID int `ovsdb:"id"` + Name string `ovsdb:"name"` + Probability int `ovsdb:"probability"` + SetID int `ovsdb:"set_id"` +} + +func (a *SampleCollector) GetUUID() string { + return a.UUID +} + +func (a *SampleCollector) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copySampleCollectorExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalSampleCollectorExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *SampleCollector) GetID() int { + return a.ID +} + +func (a *SampleCollector) GetName() string { + return a.Name +} + +func (a *SampleCollector) GetProbability() int { + return a.Probability +} + +func (a *SampleCollector) GetSetID() int { + return a.SetID +} + +func (a *SampleCollector) DeepCopyInto(b *SampleCollector) { + *b = *a + b.ExternalIDs = copySampleCollectorExternalIDs(a.ExternalIDs) +} + +func (a *SampleCollector) DeepCopy() *SampleCollector { + b := new(SampleCollector) + a.DeepCopyInto(b) + return b +} + +func (a *SampleCollector) CloneModelInto(b model.Model) { + c := b.(*SampleCollector) + a.DeepCopyInto(c) +} + +func (a *SampleCollector) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *SampleCollector) Equals(b *SampleCollector) bool { + return a.UUID == b.UUID && + equalSampleCollectorExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.ID == b.ID && + a.Name == b.Name && + a.Probability == b.Probability && + a.SetID == b.SetID +} + +func (a *SampleCollector) EqualsModel(b model.Model) bool { + c := b.(*SampleCollector) + return a.Equals(c) +} + +var _ model.CloneableModel = &SampleCollector{} +var _ model.ComparableModel = &SampleCollector{} diff --git a/go-controller/pkg/nbdb/sampling_app.go b/go-controller/pkg/nbdb/sampling_app.go new file mode 100644 index 0000000000..a152b4237d --- /dev/null +++ b/go-controller/pkg/nbdb/sampling_app.go @@ -0,0 +1,103 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const SamplingAppTable = "Sampling_App" + +type ( + SamplingAppType = string +) + +var ( + SamplingAppTypeDrop SamplingAppType = "drop" + SamplingAppTypeACLNew SamplingAppType = "acl-new" + SamplingAppTypeACLEst SamplingAppType = "acl-est" +) + +// SamplingApp defines an object in Sampling_App table +type SamplingApp struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + ID int `ovsdb:"id"` + Type SamplingAppType `ovsdb:"type"` +} + +func (a *SamplingApp) GetUUID() string { + return a.UUID +} + +func (a *SamplingApp) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copySamplingAppExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalSamplingAppExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *SamplingApp) GetID() int { + return a.ID +} + +func (a *SamplingApp) GetType() SamplingAppType { + return a.Type +} + +func (a *SamplingApp) DeepCopyInto(b *SamplingApp) { + *b = *a + b.ExternalIDs = copySamplingAppExternalIDs(a.ExternalIDs) +} + +func (a *SamplingApp) DeepCopy() *SamplingApp { + b := new(SamplingApp) + a.DeepCopyInto(b) + return b +} + +func (a *SamplingApp) CloneModelInto(b model.Model) { + c := b.(*SamplingApp) + a.DeepCopyInto(c) +} + +func (a *SamplingApp) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *SamplingApp) Equals(b *SamplingApp) bool { + return a.UUID == b.UUID && + equalSamplingAppExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.ID == b.ID && + a.Type == b.Type +} + +func (a *SamplingApp) EqualsModel(b model.Model) bool { + c := b.(*SamplingApp) + return a.Equals(c) +} + +var _ model.CloneableModel = &SamplingApp{} +var _ model.ComparableModel = &SamplingApp{} diff --git a/go-controller/pkg/network-attach-def-controller/network_attach_def_controller.go b/go-controller/pkg/network-attach-def-controller/network_attach_def_controller.go index 0250c4e695..aabdbfe4bd 100644 --- a/go-controller/pkg/network-attach-def-controller/network_attach_def_controller.go +++ b/go-controller/pkg/network-attach-def-controller/network_attach_def_controller.go @@ -5,12 +5,18 @@ import ( "errors" "fmt" "reflect" + "sync" "time" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" + coreinformers "k8s.io/client-go/informers/core/v1" + corev1listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" @@ -18,7 +24,10 @@ import ( nadinformers "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/k8s.cni.cncf.io/v1" nadlisters "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/listers/k8s.cni.cncf.io/v1" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/controller" + userdefinednetworkinformer "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/userdefinednetwork/v1" + userdefinednetworklister "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/listers/userdefinednetwork/v1" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + utiludn "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/udn" ) var ErrNetworkControllerTopologyNotManaged = errors.New("no cluster network controller to manage topology") @@ -44,46 +53,65 @@ type NetworkControllerManager interface { type watchFactory interface { NADInformer() nadinformers.NetworkAttachmentDefinitionInformer + UserDefinedNetworkInformer() userdefinednetworkinformer.UserDefinedNetworkInformer + ClusterUserDefinedNetworkInformer() userdefinednetworkinformer.ClusterUserDefinedNetworkInformer + NamespaceInformer() coreinformers.NamespaceInformer +} + +type NADController interface { + Start() error + Stop() + GetActiveNetworkForNamespace(namespace string) (util.NetInfo, error) + GetNetwork(networkName string) (util.NetInfo, error) + // DoWithLock takes care of locking and unlocking while iterating over all role primary user defined networks. + DoWithLock(f func(network util.NetInfo) error) error + GetActiveNetworkNamespaces(networkName string) ([]string, error) } // NetAttachDefinitionController handles namespaced scoped NAD events and // manages cluster scoped networks defined in those NADs. NADs are mostly -// referred from pods to give them access to the network. Different NADs can +// referenced from pods to give them access to the network. Different NADs can // define the same network as long as those definitions are actually equal. // Unexpected situations are handled on best effort basis but improper NAD -// adminstration can lead to undefined behavior in referred from running pods. +// administration can lead to undefined behavior if referenced by running pods. type NetAttachDefinitionController struct { + sync.RWMutex name string netAttachDefLister nadlisters.NetworkAttachmentDefinitionLister + udnLister userdefinednetworklister.UserDefinedNetworkLister + cudnLister userdefinednetworklister.ClusterUserDefinedNetworkLister + namespaceLister corev1listers.NamespaceLister controller controller.Controller - + recorder record.EventRecorder // networkManager is used to manage the network controllers networkManager networkManager - networks map[string]util.NetInfo - // nads to network mapping nads map[string]string + + // primaryNADs holds a mapping of namespace to primary NAD names + primaryNADs map[string]string } func NewNetAttachDefinitionController( name string, ncm NetworkControllerManager, wf watchFactory, + recorder record.EventRecorder, ) (*NetAttachDefinitionController, error) { nadController := &NetAttachDefinitionController{ name: fmt.Sprintf("[%s NAD controller]", name), - networkManager: newNetworkManager(name, ncm), - networks: map[string]util.NetInfo{}, + recorder: recorder, nads: map[string]string{}, + primaryNADs: map[string]string{}, + networkManager: newNetworkManager(name, ncm), } config := &controller.ControllerConfig[nettypes.NetworkAttachmentDefinition]{ - RateLimiter: workqueue.DefaultControllerRateLimiter(), + RateLimiter: workqueue.DefaultTypedControllerRateLimiter[string](), Reconcile: nadController.sync, ObjNeedsUpdate: nadNeedsUpdate, - // this controller is not thread safe - Threadiness: 1, + Threadiness: 1, } nadInformer := wf.NADInformer() @@ -92,6 +120,17 @@ func NewNetAttachDefinitionController( config.Informer = nadInformer.Informer() config.Lister = nadController.netAttachDefLister.List } + if util.IsNetworkSegmentationSupportEnabled() { + if udnInformer := wf.UserDefinedNetworkInformer(); udnInformer != nil { + nadController.udnLister = udnInformer.Lister() + } + if cudnInformer := wf.ClusterUserDefinedNetworkInformer(); cudnInformer != nil { + nadController.cudnLister = cudnInformer.Lister() + } + if nsInformer := wf.NamespaceInformer(); nsInformer != nil { + nadController.namespaceLister = nsInformer.Lister() + } + } nadController.controller = controller.NewController( nadController.name, @@ -179,15 +218,32 @@ func (nadController *NetAttachDefinitionController) syncNAD(key string, nad *net var nadNetwork, oldNetwork, ensureNetwork util.NetInfo var err error + namespace, _, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return fmt.Errorf("%s: failed splitting key %s: %v", nadController.name, key, err) + } + if nad != nil { nadNetwork, err = util.ParseNADInfo(nad) if err != nil { + if nadController.recorder != nil { + nadController.recorder.Eventf(&corev1.ObjectReference{Kind: nad.Kind, Namespace: nad.Namespace, Name: nad.Name}, corev1.EventTypeWarning, + "InvalidConfig", "Failed to parse network config: %v", err.Error()) + } klog.Errorf("%s: failed parsing NAD %s: %v", nadController.name, key, err) return nil } nadNetworkName = nadNetwork.GetNetworkName() } + nadController.Lock() + defer nadController.Unlock() + // We can only have one primary NAD per namespace + primaryNAD := nadController.primaryNADs[namespace] + if nadNetwork != nil && nadNetwork.IsPrimaryNetwork() && primaryNAD != "" && primaryNAD != key { + return fmt.Errorf("%s: NAD %s is primary for the namespace, NAD %s can't be primary", nadController.name, primaryNAD, key) + } + // As multiple NADs may define networks with the same name, these networks // should also have the same config to be considered compatible. If an // incompatible network change happens on NAD update, we can: @@ -197,16 +253,14 @@ func (nadController *NetAttachDefinitionController) syncNAD(key string, nad *net // the NAD refers to a different network than before if nadNetworkName != nadController.nads[key] { - oldNetwork = nadController.networks[nadController.nads[key]] + oldNetwork = nadController.networkManager.getNetwork(nadController.nads[key]) } - - currentNetwork := nadController.networks[nadNetworkName] + currentNetwork := nadController.networkManager.getNetwork(nadNetworkName) switch { case currentNetwork == nil: // the NAD refers to a new network, ensure it ensureNetwork = nadNetwork - nadController.networks[nadNetworkName] = ensureNetwork case currentNetwork.Equals(nadNetwork): // the NAD refers to an existing compatible network, ensure that // existing network holds a reference to this NAD @@ -235,7 +289,6 @@ func (nadController *NetAttachDefinitionController) syncNAD(key string, nad *net oldNetwork.DeleteNADs(key) if len(oldNetwork.GetNADs()) == 0 { nadController.networkManager.DeleteNetwork(oldNetworkName) - delete(nadController.networks, oldNetworkName) } else { nadController.networkManager.EnsureNetwork(oldNetwork) } @@ -244,6 +297,9 @@ func (nadController *NetAttachDefinitionController) syncNAD(key string, nad *net // this was a nad delete if ensureNetwork == nil { delete(nadController.nads, key) + if nadController.primaryNADs[namespace] == key { + delete(nadController.primaryNADs, namespace) + } return err } @@ -252,11 +308,22 @@ func (nadController *NetAttachDefinitionController) syncNAD(key string, nad *net return nil } - // ensure the network associated with the NAD + // ensure the network is associated with the NAD ensureNetwork.AddNADs(key) nadController.nads[key] = ensureNetwork.GetNetworkName() + // track primary NAD + switch { + case ensureNetwork.IsPrimaryNetwork(): + nadController.primaryNADs[namespace] = key + default: + if nadController.primaryNADs[namespace] == key { + delete(nadController.primaryNADs, namespace) + } + } + + // reconcile the network nadController.networkManager.EnsureNetwork(ensureNetwork) - return err + return nil } func nadNeedsUpdate(oldNAD, newNAD *nettypes.NetworkAttachmentDefinition) bool { @@ -272,3 +339,133 @@ func nadNeedsUpdate(oldNAD, newNAD *nettypes.NetworkAttachmentDefinition) bool { return !reflect.DeepEqual(oldNAD.Spec, newNAD.Spec) } + +func (nadController *NetAttachDefinitionController) GetActiveNetworkForNamespace(namespace string) (util.NetInfo, error) { + if !util.IsNetworkSegmentationSupportEnabled() { + return &util.DefaultNetInfo{}, nil + } + nadController.RLock() + defer nadController.RUnlock() + primaryNAD := nadController.primaryNADs[namespace] + if primaryNAD != "" { + // we have a primary NAD, get the network + netName := nadController.nads[primaryNAD] + if netName == "" { + // this should never happen where we have a nad keyed in the primaryNADs + // map, but it doesn't exist in the nads map + panic("NAD Controller broken consistency between primary NADs and cached NADs") + } + network := nadController.networkManager.getNetwork(netName) + n := util.CopyNetInfo(network) + // update the returned netInfo copy to only have the primary NAD for this namespace + n.SetNADs(primaryNAD) + return n, nil + } + + // no primary network found, make sure we just haven't processed it yet and no UDN / CUDN exists + udns, err := nadController.udnLister.UserDefinedNetworks(namespace).List(labels.Everything()) + if err != nil { + return nil, fmt.Errorf("error getting user defined networks: %w", err) + } + for _, udn := range udns { + if utiludn.IsPrimaryNetwork(&udn.Spec) { + return nil, util.NewUnprocessedActiveNetworkError(namespace, udn.Name) + } + } + cudns, err := nadController.cudnLister.List(labels.Everything()) + if err != nil { + return nil, fmt.Errorf("failed to list CUDNs: %w", err) + } + for _, cudn := range cudns { + if !utiludn.IsPrimaryNetwork(&cudn.Spec.Network) { + continue + } + // check the subject namespace referred by the specified namespace-selector + cudnNamespaceSelector, err := metav1.LabelSelectorAsSelector(&cudn.Spec.NamespaceSelector) + if err != nil { + return nil, fmt.Errorf("failed to convert CUDN %q namespaceSelector: %w", cudn.Name, err) + } + selectedNamespaces, err := nadController.namespaceLister.List(cudnNamespaceSelector) + if err != nil { + return nil, fmt.Errorf("failed to list namespaces using selector %q: %w", cudnNamespaceSelector, err) + } + for _, ns := range selectedNamespaces { + if ns.Name == namespace { + return nil, util.NewUnprocessedActiveNetworkError(namespace, cudn.Name) + } + } + } + + return &util.DefaultNetInfo{}, nil +} + +func (nadController *NetAttachDefinitionController) GetNetwork(networkName string) (util.NetInfo, error) { + if !util.IsNetworkSegmentationSupportEnabled() { + return &util.DefaultNetInfo{}, nil + } + nadController.RLock() + defer nadController.RUnlock() + if networkName == "" { + return nil, fmt.Errorf("network must not be empty") + } + if networkName == "default" { + return &util.DefaultNetInfo{}, nil + } + network := nadController.networkManager.getNetwork(networkName) + if network == nil { + return nil, fmt.Errorf("failed to find network %q", networkName) + } + return util.CopyNetInfo(network), nil +} + +func (nadController *NetAttachDefinitionController) GetActiveNetworkNamespaces(networkName string) ([]string, error) { + if !util.IsNetworkSegmentationSupportEnabled() { + return []string{"default"}, nil + } + namespaces := make([]string, 0) + nadController.RLock() + defer nadController.RUnlock() + for namespaceName, primaryNAD := range nadController.primaryNADs { + nadNetworkName := nadController.nads[primaryNAD] + if nadNetworkName != networkName { + continue + } + namespaces = append(namespaces, namespaceName) + } + return namespaces, nil +} + +// DoWithLock iterates over all role primary user defined networks and executes the given fn with each network as input. +// An error will not block execution and instead all errors will be aggregated and returned when all networks are processed. +func (nadController *NetAttachDefinitionController) DoWithLock(f func(network util.NetInfo) error) error { + if !util.IsNetworkSegmentationSupportEnabled() { + defaultNetwork := &util.DefaultNetInfo{} + return f(defaultNetwork) + } + nadController.RLock() + defer nadController.RUnlock() + + var errs []error + for _, primaryNAD := range nadController.primaryNADs { + if primaryNAD == "" { + continue + } + netName := nadController.nads[primaryNAD] + if netName == "" { + // this should never happen where we have a nad keyed in the primaryNADs + // map, but it doesn't exist in the nads map + panic("NAD Controller broken consistency between primary NADs and cached NADs") + } + network := nadController.networkManager.getNetwork(netName) + n := util.CopyNetInfo(network) + // update the returned netInfo copy to only have the primary NAD for this namespace + n.SetNADs(primaryNAD) + if err := f(n); err != nil { + errs = append(errs, err) + } + } + if len(errs) > 0 { + return errors.Join(errs...) + } + return nil +} diff --git a/go-controller/pkg/network-attach-def-controller/network_attach_def_controller_test.go b/go-controller/pkg/network-attach-def-controller/network_attach_def_controller_test.go index ac4e1f5c63..aff03de77a 100644 --- a/go-controller/pkg/network-attach-def-controller/network_attach_def_controller_test.go +++ b/go-controller/pkg/network-attach-def-controller/network_attach_def_controller_test.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "reflect" "sync" "testing" @@ -31,6 +32,7 @@ type testNetworkController struct { func (tnc *testNetworkController) Start(context.Context) error { tnc.tncm.Lock() defer tnc.tncm.Unlock() + fmt.Printf("starting network: %s\n", testNetworkKey(tnc)) tnc.tncm.started = append(tnc.tncm.started, testNetworkKey(tnc)) return nil } @@ -65,12 +67,17 @@ type testNetworkControllerManager struct { stopped []string cleaned []string + raiseErrorWhenCreatingController error + valid []util.BasicNetInfo } func (tncm *testNetworkControllerManager) NewNetworkController(netInfo util.NetInfo) (NetworkController, error) { tncm.Lock() defer tncm.Unlock() + if tncm.raiseErrorWhenCreatingController != nil { + return nil, tncm.raiseErrorWhenCreatingController + } t := &testNetworkController{ NetInfo: netInfo, tncm: tncm, @@ -85,33 +92,45 @@ func (tncm *testNetworkControllerManager) CleanupDeletedNetworks(validNetworks . } func TestNetAttachDefinitionController(t *testing.T) { - network_A := &ovncnitypes.NetConf{ + networkAPrimary := &ovncnitypes.NetConf{ Topology: types.Layer2Topology, NetConf: cnitypes.NetConf{ - Name: "network_A", + Name: "networkAPrimary", Type: "ovn-k8s-cni-overlay", }, - MTU: 1400, + Subnets: "10.1.130.0/24", + Role: types.NetworkRolePrimary, + MTU: 1400, } - network_A_incompatible := &ovncnitypes.NetConf{ + networkAIncompatible := &ovncnitypes.NetConf{ Topology: types.LocalnetTopology, NetConf: cnitypes.NetConf{ - Name: "network_A", + Name: "networkAPrimary", Type: "ovn-k8s-cni-overlay", }, MTU: 1400, } + networkASecondary := &ovncnitypes.NetConf{ + Topology: types.Layer2Topology, + NetConf: cnitypes.NetConf{ + Name: "networkAPrimary", + Type: "ovn-k8s-cni-overlay", + }, + Subnets: "10.1.130.0/24", + Role: types.NetworkRoleSecondary, + MTU: 1400, + } - network_B := &ovncnitypes.NetConf{ + networkBSecondary := &ovncnitypes.NetConf{ Topology: types.LocalnetTopology, NetConf: cnitypes.NetConf{ - Name: "network_B", + Name: "networkBSecondary", Type: "ovn-k8s-cni-overlay", }, MTU: 1400, } - network_Default := &ovncnitypes.NetConf{ + networkDefault := &ovncnitypes.NetConf{ Topology: types.Layer3Topology, NetConf: cnitypes.NetConf{ Name: "default", @@ -139,7 +158,7 @@ func TestNetAttachDefinitionController(t *testing.T) { args: []args{ { nad: "test/nad_1", - network: network_Default, + network: networkDefault, }, }, expected: []expected{}, @@ -149,12 +168,12 @@ func TestNetAttachDefinitionController(t *testing.T) { args: []args{ { nad: "test/nad_1", - network: network_A, + network: networkAPrimary, }, }, expected: []expected{ { - network: network_A, + network: networkAPrimary, nads: []string{"test/nad_1"}, }, }, @@ -164,7 +183,7 @@ func TestNetAttachDefinitionController(t *testing.T) { args: []args{ { nad: "test/nad_1", - network: network_A, + network: networkAPrimary, }, { nad: "test/nad_1", @@ -176,30 +195,72 @@ func TestNetAttachDefinitionController(t *testing.T) { args: []args{ { nad: "test/nad_1", - network: network_A, + network: networkASecondary, }, { nad: "test/nad_2", - network: network_A, + network: networkASecondary, }, }, expected: []expected{ { - network: network_A, + network: networkASecondary, nads: []string{"test/nad_1", "test/nad_2"}, }, }, }, + { + name: "Two Primary NADs added for same namespace", + args: []args{ + { + nad: "test/nad_1", + network: networkAPrimary, + }, + { + nad: "test/nad_2", + network: networkAPrimary, + wantErr: true, + }, + }, + expected: []expected{ + { + network: networkAPrimary, + nads: []string{"test/nad_1"}, + }, + }, + }, + { + name: "two Primary NADs added then one deleted", + args: []args{ + { + nad: "test/nad_1", + network: networkAPrimary, + }, + { + nad: "test2/nad_2", + network: networkAPrimary, + }, + { + nad: "test/nad_1", + }, + }, + expected: []expected{ + { + network: networkAPrimary, + nads: []string{"test2/nad_2"}, + }, + }, + }, { name: "two NADs added then one deleted", args: []args{ { nad: "test/nad_1", - network: network_A, + network: networkASecondary, }, { nad: "test/nad_2", - network: network_A, + network: networkASecondary, }, { nad: "test/nad_1", @@ -207,7 +268,7 @@ func TestNetAttachDefinitionController(t *testing.T) { }, expected: []expected{ { - network: network_A, + network: networkASecondary, nads: []string{"test/nad_2"}, }, }, @@ -217,11 +278,11 @@ func TestNetAttachDefinitionController(t *testing.T) { args: []args{ { nad: "test/nad_1", - network: network_A, + network: networkASecondary, }, { nad: "test/nad_2", - network: network_A, + network: networkASecondary, }, { nad: "test/nad_2", @@ -236,16 +297,16 @@ func TestNetAttachDefinitionController(t *testing.T) { args: []args{ { nad: "test/nad_1", - network: network_A, + network: networkAPrimary, }, { nad: "test/nad_1", - network: network_B, + network: networkBSecondary, }, }, expected: []expected{ { - network: network_B, + network: networkBSecondary, nads: []string{"test/nad_1"}, }, }, @@ -255,24 +316,24 @@ func TestNetAttachDefinitionController(t *testing.T) { args: []args{ { nad: "test/nad_1", - network: network_A, + network: networkASecondary, }, { nad: "test/nad_2", - network: network_A, + network: networkASecondary, }, { nad: "test/nad_1", - network: network_B, + network: networkBSecondary, }, }, expected: []expected{ { - network: network_A, + network: networkASecondary, nads: []string{"test/nad_2"}, }, { - network: network_B, + network: networkBSecondary, nads: []string{"test/nad_1"}, }, }, @@ -282,20 +343,20 @@ func TestNetAttachDefinitionController(t *testing.T) { args: []args{ { nad: "test/nad_1", - network: network_A, + network: networkAPrimary, }, { nad: "test/nad_2", - network: network_B, + network: networkBSecondary, }, { nad: "test/nad_1", - network: network_B, + network: networkBSecondary, }, }, expected: []expected{ { - network: network_B, + network: networkBSecondary, nads: []string{"test/nad_1", "test/nad_2"}, }, }, @@ -305,17 +366,17 @@ func TestNetAttachDefinitionController(t *testing.T) { args: []args{ { nad: "test/nad_1", - network: network_A, + network: networkAPrimary, }, { nad: "test/nad_2", - network: network_A_incompatible, + network: networkAIncompatible, wantErr: true, }, }, expected: []expected{ { - network: network_A, + network: networkAPrimary, nads: []string{"test/nad_1"}, }, }, @@ -325,16 +386,16 @@ func TestNetAttachDefinitionController(t *testing.T) { args: []args{ { nad: "test/nad_1", - network: network_A, + network: networkAPrimary, }, { nad: "test/nad_1", - network: network_A_incompatible, + network: networkAIncompatible, }, }, expected: []expected{ { - network: network_A_incompatible, + network: networkAIncompatible, nads: []string{"test/nad_1"}, }, }, @@ -344,21 +405,21 @@ func TestNetAttachDefinitionController(t *testing.T) { args: []args{ { nad: "test/nad_1", - network: network_A, + network: networkASecondary, }, { nad: "test/nad_2", - network: network_A, + network: networkASecondary, }, { nad: "test/nad_1", - network: network_A_incompatible, + network: networkAIncompatible, wantErr: true, }, }, expected: []expected{ { - network: network_A, + network: networkASecondary, nads: []string{"test/nad_2"}, }, }, @@ -367,13 +428,17 @@ func TestNetAttachDefinitionController(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := gomega.NewWithT(t) + err := config.PrepareTestConfig() + g.Expect(err).ToNot(gomega.HaveOccurred()) + config.OVNKubernetesFeature.EnableNetworkSegmentation = true + config.OVNKubernetesFeature.EnableMultiNetwork = true tncm := &testNetworkControllerManager{ controllers: map[string]NetworkController{}, } nadController := &NetAttachDefinitionController{ - networks: map[string]util.NetInfo{}, nads: map[string]string{}, networkManager: newNetworkManager("", tncm), + primaryNADs: map[string]string{}, } g.Expect(nadController.networkManager.Start()).To(gomega.Succeed()) @@ -399,9 +464,6 @@ func TestNetAttachDefinitionController(t *testing.T) { } meetsExpectations := func(g gomega.Gomega) { - tncm.Lock() - defer tncm.Unlock() - var expectRunning []string for _, expected := range tt.expected { netInfo, err := util.NewNetInfo(expected.network) @@ -409,17 +471,30 @@ func TestNetAttachDefinitionController(t *testing.T) { name := netInfo.GetNetworkName() testNetworkKey := testNetworkKey(netInfo) - - // test that the controller have the expected config and NADs - g.Expect(tncm.controllers).To(gomega.HaveKey(testNetworkKey)) - g.Expect(tncm.controllers[testNetworkKey].Equals(netInfo)).To(gomega.BeTrue(), - fmt.Sprintf("matching network config for network %s", name)) - g.Expect(tncm.controllers[testNetworkKey].GetNADs()).To(gomega.ConsistOf(expected.nads), - fmt.Sprintf("matching NADs for network %s", name)) + func() { + tncm.Lock() + defer tncm.Unlock() + // test that the controller have the expected config and NADs + g.Expect(tncm.controllers).To(gomega.HaveKey(testNetworkKey)) + g.Expect(tncm.controllers[testNetworkKey].Equals(netInfo)).To(gomega.BeTrue(), + fmt.Sprintf("matching network config for network %s", name)) + g.Expect(tncm.controllers[testNetworkKey].GetNADs()).To(gomega.ConsistOf(expected.nads), + fmt.Sprintf("matching NADs for network %s", name)) + }() expectRunning = append(expectRunning, testNetworkKey) + if netInfo.IsPrimaryNetwork() && !netInfo.IsDefault() { + netInfo.SetNADs(expected.nads...) + key := expected.nads[0] + namespace, _, err := cache.SplitMetaNamespaceKey(key) + g.Expect(err).ToNot(gomega.HaveOccurred()) + netInfoFound, err := nadController.GetActiveNetworkForNamespace(namespace) + g.Expect(err).ToNot(gomega.HaveOccurred()) + g.Expect(reflect.DeepEqual(netInfoFound, netInfo)).To(gomega.BeTrue()) + } } + tncm.Lock() + defer tncm.Unlock() expectStopped := sets.New(tncm.started...).Difference(sets.New(expectRunning...)).UnsortedList() - // test that the controllers are started, stopped and cleaned up as expected g.Expect(tncm.started).To(gomega.ContainElements(expectRunning), "started network controllers") g.Expect(tncm.stopped).To(gomega.ConsistOf(expectStopped), "stopped network controllers") @@ -434,13 +509,15 @@ func TestNetAttachDefinitionController(t *testing.T) { func TestSyncAll(t *testing.T) { network_A := &ovncnitypes.NetConf{ - Topology: types.Layer2Topology, + Topology: types.Layer3Topology, NetConf: cnitypes.NetConf{ Name: "network_A", Type: "ovn-k8s-cni-overlay", }, - MTU: 1400, + Role: types.NetworkRolePrimary, + MTU: 1400, } + network_A_Copy := *network_A network_B := &ovncnitypes.NetConf{ Topology: types.LocalnetTopology, NetConf: cnitypes.NetConf{ @@ -454,8 +531,9 @@ func TestSyncAll(t *testing.T) { netconf *ovncnitypes.NetConf } tests := []struct { - name string - testNADs []TestNAD + name string + testNADs []TestNAD + syncAllError error }{ { name: "multiple networks referenced by multiple nads", @@ -469,16 +547,20 @@ func TestSyncAll(t *testing.T) { netconf: network_B, }, { - name: "test/nad3", - netconf: network_A, + name: "test2/nad3", + netconf: &network_A_Copy, }, }, + syncAllError: ErrNetworkControllerTopologyNotManaged, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := gomega.NewWithT(t) + err := config.PrepareTestConfig() + g.Expect(err).ToNot(gomega.HaveOccurred()) + config.OVNKubernetesFeature.EnableNetworkSegmentation = true config.OVNKubernetesFeature.EnableMultiNetwork = true fakeClient := util.GetOVNClientset().GetOVNKubeControllerClientset() wf, err := factory.NewOVNKubeControllerWatchFactory(fakeClient) @@ -487,14 +569,20 @@ func TestSyncAll(t *testing.T) { tncm := &testNetworkControllerManager{ controllers: map[string]NetworkController{}, } + if tt.syncAllError != nil { + tncm.raiseErrorWhenCreatingController = tt.syncAllError + } + nadController, err := NewNetAttachDefinitionController( "SUT", tncm, wf, + nil, ) g.Expect(err).ToNot(gomega.HaveOccurred()) expectedNetworks := map[string]util.NetInfo{} + expectedPrimaryNetworks := map[string]util.BasicNetInfo{} for _, testNAD := range tt.testNADs { namespace, name, err := cache.SplitMetaNamespaceKey(testNAD.name) g.Expect(err).ToNot(gomega.HaveOccurred()) @@ -512,6 +600,9 @@ func TestSyncAll(t *testing.T) { netInfo, err = util.NewNetInfo(testNAD.netconf) g.Expect(err).ToNot(gomega.HaveOccurred()) expectedNetworks[testNAD.netconf.Name] = netInfo + if netInfo.IsPrimaryNetwork() && !netInfo.IsDefault() { + expectedPrimaryNetworks[netInfo.GetNetworkName()] = netInfo + } } } @@ -534,6 +625,11 @@ func TestSyncAll(t *testing.T) { g.Expect(actualNetworks).To(gomega.HaveKey(name)) g.Expect(actualNetworks[name].Equals(network)).To(gomega.BeTrue()) } + + actualPrimaryNetwork, err := nadController.GetActiveNetworkForNamespace("test") + g.Expect(err).ToNot(gomega.HaveOccurred()) + g.Expect(expectedPrimaryNetworks).To(gomega.HaveKey(actualPrimaryNetwork.GetNetworkName())) + g.Expect(expectedPrimaryNetworks[actualPrimaryNetwork.GetNetworkName()].Equals(actualPrimaryNetwork)).To(gomega.BeTrue()) }) } } diff --git a/go-controller/pkg/network-attach-def-controller/network_manager.go b/go-controller/pkg/network-attach-def-controller/network_manager.go index 4eef401c46..63763afd50 100644 --- a/go-controller/pkg/network-attach-def-controller/network_manager.go +++ b/go-controller/pkg/network-attach-def-controller/network_manager.go @@ -33,6 +33,8 @@ type networkManager interface { // Stop the controller Stop() + + getNetwork(string) util.NetInfo } func newNetworkManager(name string, ncm NetworkControllerManager) networkManager { @@ -45,8 +47,8 @@ func newNetworkManager(name string, ncm NetworkControllerManager) networkManager // this controller does not feed from an informer, networks are manually // added to the queue for processing config := &controller.ReconcilerConfig{ - RateLimiter: workqueue.DefaultControllerRateLimiter(), - Reconcile: nc.syncLocked, + RateLimiter: workqueue.DefaultTypedControllerRateLimiter[string](), + Reconcile: nc.sync, Threadiness: 1, } nc.controller = controller.NewReconciler( @@ -62,7 +64,7 @@ type networkControllerState struct { } type networkManagerImpl struct { - sync.Mutex + sync.RWMutex name string controller controller.Reconciler ncm NetworkControllerManager @@ -78,33 +80,73 @@ func (nm *networkManagerImpl) Start() error { func (nm *networkManagerImpl) Stop() { controller.Stop(nm.controller) - - for _, networkControllerState := range nm.networkControllers { + for _, networkControllerState := range nm.getAllNetworkStates() { networkControllerState.controller.Stop() } } func (nm *networkManagerImpl) EnsureNetwork(network util.NetInfo) { - nm.Lock() - defer nm.Unlock() - nm.networks[network.GetNetworkName()] = network + nm.setNetwork(network.GetNetworkName(), network) nm.controller.Reconcile(network.GetNetworkName()) } func (nm *networkManagerImpl) DeleteNetwork(network string) { + nm.setNetwork(network, nil) + nm.controller.Reconcile(network) +} + +func (nm *networkManagerImpl) setNetwork(network string, netInfo util.NetInfo) { nm.Lock() defer nm.Unlock() - delete(nm.networks, network) - nm.controller.Reconcile(network) + if netInfo == nil { + delete(nm.networks, network) + return + } + nm.networks[network] = netInfo +} + +func (nm *networkManagerImpl) getNetwork(network string) util.NetInfo { + nm.RLock() + defer nm.RUnlock() + return nm.networks[network] } -func (nm *networkManagerImpl) syncLocked(network string) error { +func (nm *networkManagerImpl) getAllNetworks() []util.BasicNetInfo { + nm.RLock() + defer nm.RUnlock() + networks := make([]util.BasicNetInfo, 0, len(nm.networks)) + for _, network := range nm.networks { + networks = append(networks, network) + } + return networks +} + +func (nm *networkManagerImpl) setNetworkState(network string, state *networkControllerState) { nm.Lock() defer nm.Unlock() - return nm.sync(network) + if state == nil { + delete(nm.networkControllers, network) + return + } + nm.networkControllers[network] = state +} + +func (nm *networkManagerImpl) getNetworkState(network string) *networkControllerState { + nm.RLock() + defer nm.RUnlock() + return nm.networkControllers[network] +} + +func (nm *networkManagerImpl) getAllNetworkStates() []*networkControllerState { + nm.RLock() + defer nm.RUnlock() + networkStates := make([]*networkControllerState, 0, len(nm.networks)) + for _, state := range nm.networkControllers { + networkStates = append(networkStates, state) + } + return networkStates } -// sync must be called with nm mutex locked func (nm *networkManagerImpl) sync(network string) error { startTime := time.Now() klog.V(5).Infof("%s: sync network %s", nm.name, network) @@ -112,8 +154,8 @@ func (nm *networkManagerImpl) sync(network string) error { klog.V(4).Infof("%s: finished syncing network %s, took %v", nm.name, network, time.Since(startTime)) }() - want := nm.networks[network] - have := nm.networkControllers[network] + want := nm.getNetwork(network) + have := nm.getNetworkState(network) // we will dispose of the old network if deletion is in progress or if // configuration changed @@ -128,7 +170,7 @@ func (nm *networkManagerImpl) sync(network string) error { if err != nil { return fmt.Errorf("%s: failed to cleanup network %s: %w", nm.name, network, err) } - delete(nm.networkControllers, network) + nm.setNetworkState(network, nil) } // no network needed so nothing to do @@ -136,7 +178,7 @@ func (nm *networkManagerImpl) sync(network string) error { return nil } - // this might just be an update of the network NADs + // we didn't dispose of current controller, so this might just be an update of the network NADs if have != nil && !dispose { have.controller.SetNADs(want.GetNADs()...) return nil @@ -152,22 +194,15 @@ func (nm *networkManagerImpl) sync(network string) error { if err != nil { return fmt.Errorf("%s: failed to start network %s: %w", nm.name, network, err) } - nm.networkControllers[network] = &networkControllerState{controller: nc} + nm.setNetworkState(network, &networkControllerState{controller: nc}) return nil } func (nm *networkManagerImpl) syncAll() error { - nm.Lock() - defer nm.Unlock() // as we sync upon start, consider networks that have not been ensured as // stale and clean them up - validNetworks := make([]util.BasicNetInfo, 0, len(nm.networks)) - networkNames := make([]string, 0, len(nm.networks)) - for name, network := range nm.networks { - validNetworks = append(validNetworks, network) - networkNames = append(networkNames, name) - } + validNetworks := nm.getAllNetworks() if err := nm.ncm.CleanupDeletedNetworks(validNetworks...); err != nil { return err } @@ -179,15 +214,15 @@ func (nm *networkManagerImpl) syncAll() error { // as stale. start := time.Now() klog.Infof("%s: syncing all networks", nm.name) - for _, networkName := range networkNames { - if err := nm.sync(networkName); errors.Is(err, ErrNetworkControllerTopologyNotManaged) { + for _, network := range validNetworks { + if err := nm.sync(network.GetNetworkName()); errors.Is(err, ErrNetworkControllerTopologyNotManaged) { klog.V(5).Infof( "ignoring network %q since %q does not manage it", - networkName, + network.GetNetworkName(), nm.name, ) } else if err != nil { - return fmt.Errorf("failed to sync network %s: %w", networkName, err) + return fmt.Errorf("failed to sync network %s: %w", network.GetNetworkName(), err) } } klog.Infof("%s: finished syncing all networks. Time taken: %s", nm.name, time.Since(start)) diff --git a/go-controller/pkg/network-controller-manager/network_controller_manager.go b/go-controller/pkg/network-controller-manager/network_controller_manager.go index a267b450ef..fc041db752 100644 --- a/go-controller/pkg/network-controller-manager/network_controller_manager.go +++ b/go-controller/pkg/network-controller-manager/network_controller_manager.go @@ -8,6 +8,7 @@ import ( "github.com/containernetworking/cni/pkg/types" libovsdbclient "github.com/ovn-org/libovsdb/client" + ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" @@ -17,7 +18,10 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" nad "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/network-attach-def-controller" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/observability" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn" + addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/controller/udnenabledsvc" ovntypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" @@ -46,13 +50,15 @@ type NetworkControllerManager struct { // Supports OVN Template Load Balancers? svcTemplateSupport bool - stopChan chan struct{} - wg *sync.WaitGroup - + stopChan chan struct{} + wg *sync.WaitGroup + portCache *ovn.PortCache defaultNetworkController nad.BaseNetworkController // net-attach-def controller handle net-attach-def and create/delete network controllers nadController *nad.NetAttachDefinitionController + // eIPController programs OVN to support EgressIP + eIPController *ovn.EgressIPController } func (cm *NetworkControllerManager) NewNetworkController(nInfo util.NetInfo) (nad.NetworkController, error) { @@ -63,11 +69,11 @@ func (cm *NetworkControllerManager) NewNetworkController(nInfo util.NetInfo) (na topoType := nInfo.TopologyType() switch topoType { case ovntypes.Layer3Topology: - return ovn.NewSecondaryLayer3NetworkController(cnci, nInfo), nil + return ovn.NewSecondaryLayer3NetworkController(cnci, nInfo, cm.nadController, cm.eIPController, cm.portCache) case ovntypes.Layer2Topology: - return ovn.NewSecondaryLayer2NetworkController(cnci, nInfo), nil + return ovn.NewSecondaryLayer2NetworkController(cnci, nInfo, cm.nadController) case ovntypes.LocalnetTopology: - return ovn.NewSecondaryLocalnetNetworkController(cnci, nInfo), nil + return ovn.NewSecondaryLocalnetNetworkController(cnci, nInfo, cm.nadController), nil } return nil, fmt.Errorf("topology type %s not supported", topoType) } @@ -81,11 +87,11 @@ func (cm *NetworkControllerManager) newDummyNetworkController(topoType, netName netInfo, _ := util.NewNetInfo(&ovncnitypes.NetConf{NetConf: types.NetConf{Name: netName}, Topology: topoType}) switch topoType { case ovntypes.Layer3Topology: - return ovn.NewSecondaryLayer3NetworkController(cnci, netInfo), nil + return ovn.NewSecondaryLayer3NetworkController(cnci, netInfo, cm.nadController, cm.eIPController, cm.portCache) case ovntypes.Layer2Topology: - return ovn.NewSecondaryLayer2NetworkController(cnci, netInfo), nil + return ovn.NewSecondaryLayer2NetworkController(cnci, netInfo, cm.nadController) case ovntypes.LocalnetTopology: - return ovn.NewSecondaryLocalnetNetworkController(cnci, netInfo), nil + return ovn.NewSecondaryLocalnetNetworkController(cnci, netInfo, cm.nadController), nil } return nil, fmt.Errorf("topology type %s not supported", topoType) } @@ -93,9 +99,15 @@ func (cm *NetworkControllerManager) newDummyNetworkController(topoType, netName // Find all the OVN logical switches/routers for the secondary networks func findAllSecondaryNetworkLogicalEntities(nbClient libovsdbclient.Client) ([]*nbdb.LogicalSwitch, []*nbdb.LogicalRouter, error) { + + belongsToSecondaryNetwork := func(externalIDs map[string]string) bool { + _, hasNetworkExternalID := externalIDs[ovntypes.NetworkExternalID] + networkRole, hasNetworkRoleExternalID := externalIDs[ovntypes.NetworkRoleExternalID] + return hasNetworkExternalID && hasNetworkRoleExternalID && networkRole == ovntypes.NetworkRoleSecondary + } + p1 := func(item *nbdb.LogicalSwitch) bool { - _, ok := item.ExternalIDs[ovntypes.NetworkExternalID] - return ok + return belongsToSecondaryNetwork(item.ExternalIDs) } nodeSwitches, err := libovsdbops.FindLogicalSwitchesWithPredicate(nbClient, p1) if err != nil { @@ -103,8 +115,7 @@ func findAllSecondaryNetworkLogicalEntities(nbClient libovsdbclient.Client) ([]* return nil, nil, err } p2 := func(item *nbdb.LogicalRouter) bool { - _, ok := item.ExternalIDs[ovntypes.NetworkExternalID] - return ok + return belongsToSecondaryNetwork(item.ExternalIDs) } clusterRouters, err := libovsdbops.FindLogicalRoutersWithPredicate(nbClient, p2) if err != nil { @@ -173,7 +184,7 @@ func NewNetworkControllerManager(ovnClient *util.OVNClientset, wf *factory.Watch libovsdbOvnNBClient libovsdbclient.Client, libovsdbOvnSBClient libovsdbclient.Client, recorder record.EventRecorder, wg *sync.WaitGroup) (*NetworkControllerManager, error) { podRecorder := metrics.NewPodRecorder() - + stopCh := make(chan struct{}) cm := &NetworkControllerManager{ client: ovnClient.KubeClient, kube: &kube.KubeOVN{ @@ -187,20 +198,20 @@ func NewNetworkControllerManager(ovnClient *util.OVNClientset, wf *factory.Watch EgressQoSClient: ovnClient.EgressQoSClient, IPAMClaimsClient: ovnClient.IPAMClaimsClient, }, - stopChan: make(chan struct{}), - watchFactory: wf, - recorder: recorder, - nbClient: libovsdbOvnNBClient, - sbClient: libovsdbOvnSBClient, - podRecorder: &podRecorder, - + stopChan: stopCh, + watchFactory: wf, + recorder: recorder, + nbClient: libovsdbOvnNBClient, + sbClient: libovsdbOvnSBClient, + podRecorder: &podRecorder, + portCache: ovn.NewPortCache(stopCh), wg: wg, multicastSupport: config.EnableMulticast, } var err error if config.OVNKubernetesFeature.EnableMultiNetwork { - cm.nadController, err = nad.NewNetAttachDefinitionController("network-controller-manager", cm, wf) + cm.nadController, err = nad.NewNetAttachDefinitionController("network-controller-manager", cm, wf, nil) if err != nil { return nil, err } @@ -279,12 +290,13 @@ func (cm *NetworkControllerManager) newCommonNetworkControllerInfo() (*ovn.Commo } // initDefaultNetworkController creates the controller for default network -func (cm *NetworkControllerManager) initDefaultNetworkController() error { +func (cm *NetworkControllerManager) initDefaultNetworkController(nadController *nad.NetAttachDefinitionController, + observManager *observability.Manager) error { cnci, err := cm.newCommonNetworkControllerInfo() if err != nil { return fmt.Errorf("failed to create common network controller info: %w", err) } - defaultController, err := ovn.NewDefaultNetworkController(cnci) + defaultController, err := ovn.NewDefaultNetworkController(cnci, nadController, observManager, cm.portCache, cm.eIPController) if err != nil { return err } @@ -380,7 +392,46 @@ func (cm *NetworkControllerManager) Start(ctx context.Context) error { } cm.podRecorder.Run(cm.sbClient, cm.stopChan) - err = cm.initDefaultNetworkController() + if config.OVNKubernetesFeature.EnableEgressIP { + cm.eIPController = ovn.NewEIPController(cm.nbClient, cm.kube, cm.watchFactory, cm.recorder, cm.portCache, cm.nadController, + addressset.NewOvnAddressSetFactory(cm.nbClient, config.IPv4Mode, config.IPv6Mode), config.IPv4Mode, config.IPv6Mode, zone, ovn.DefaultNetworkControllerName) + // FIXME(martinkennelly): remove when EIP controller is fully extracted from from DNC and started here. Ensure SyncLocalNodeZonesCache is re-enabled in EIP controller. + if err = cm.eIPController.SyncLocalNodeZonesCache(); err != nil { + klog.Warningf("Failed to sync EgressIP controllers local node node cache: %v", err) + } + } + + // nadController is nil if multi-network is disabled + if cm.nadController != nil { + if err = cm.nadController.Start(); err != nil { + return fmt.Errorf("failed to start NAD Controller :%v", err) + } + } + + var observabilityManager *observability.Manager + if config.OVNKubernetesFeature.EnableObservability { + observabilityManager = observability.NewManager(cm.nbClient) + if err = observabilityManager.Init(); err != nil { + return fmt.Errorf("failed to init observability manager: %w", err) + } + } else { + err = observability.Cleanup(cm.nbClient) + if err != nil { + klog.Warningf("Observability cleanup failed, expected if not all Samples ware deleted yet: %v", err) + } + } + + if util.IsNetworkSegmentationSupportEnabled() { + addressSetFactory := addressset.NewOvnAddressSetFactory(cm.nbClient, config.IPv4Mode, config.IPv6Mode) + go func() { + if err := udnenabledsvc.NewController(cm.nbClient, addressSetFactory, cm.watchFactory.ServiceCoreInformer(), + config.Default.UDNAllowedDefaultServices).Run(cm.stopChan); err != nil { + klog.Errorf("UDN enabled service controller failed: %v", err) + } + }() + } + + err = cm.initDefaultNetworkController(cm.nadController, observabilityManager) if err != nil { return fmt.Errorf("failed to init default network controller: %v", err) } @@ -389,11 +440,6 @@ func (cm *NetworkControllerManager) Start(ctx context.Context) error { return fmt.Errorf("failed to start default network controller: %v", err) } - // nadController is nil if multi-network is disabled - if cm.nadController != nil { - return cm.nadController.Start() - } - return nil } diff --git a/go-controller/pkg/network-controller-manager/network_controller_manager_suite_test.go b/go-controller/pkg/network-controller-manager/network_controller_manager_suite_test.go index 7188f24e98..44ae4d83a6 100644 --- a/go-controller/pkg/network-controller-manager/network_controller_manager_suite_test.go +++ b/go-controller/pkg/network-controller-manager/network_controller_manager_suite_test.go @@ -3,7 +3,7 @@ package networkControllerManager import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/network-controller-manager/node_network_controller_manager.go b/go-controller/pkg/network-controller-manager/node_network_controller_manager.go index 6547886686..835bf0f382 100644 --- a/go-controller/pkg/network-controller-manager/node_network_controller_manager.go +++ b/go-controller/pkg/network-controller-manager/node_network_controller_manager.go @@ -36,7 +36,7 @@ type nodeNetworkControllerManager struct { wg *sync.WaitGroup recorder record.EventRecorder - defaultNodeNetworkController nad.BaseNetworkController + defaultNodeNetworkController *node.DefaultNodeNetworkController // net-attach-def controller handle net-attach-def and create/delete secondary controllers // nil in dpu-host mode @@ -54,12 +54,8 @@ func (ncm *nodeNetworkControllerManager) NewNetworkController(nInfo util.NetInfo topoType := nInfo.TopologyType() switch topoType { case ovntypes.Layer3Topology, ovntypes.Layer2Topology, ovntypes.LocalnetTopology: - dnnc, ok := ncm.defaultNodeNetworkController.(*node.DefaultNodeNetworkController) - if !ok { - return nil, fmt.Errorf("unable to deference default node network controller object") - } return node.NewSecondaryNodeNetworkController(ncm.newCommonNetworkControllerInfo(), - nInfo, ncm.vrfManager, ncm.ruleManager, dnnc.Gateway) + nInfo, ncm.vrfManager, ncm.ruleManager, ncm.defaultNodeNetworkController.Gateway) } return nil, fmt.Errorf("topology type %s not supported", topoType) } @@ -87,11 +83,11 @@ func (ncm *nodeNetworkControllerManager) CleanupDeletedNetworks(validNetworks .. func (ncm *nodeNetworkControllerManager) getNetworkID(network util.BasicNetInfo) (int, error) { nodes, err := ncm.watchFactory.GetNodes() if err != nil { - return util.InvalidNetworkID, err + return util.InvalidID, err } networkID, err := util.GetNetworkID(nodes, network) if err != nil { - return util.InvalidNetworkID, err + return util.InvalidID, err } return networkID, nil } @@ -105,8 +101,8 @@ func (ncm *nodeNetworkControllerManager) newCommonNetworkControllerInfo() *node. // (1) dpu mode is enabled when secondary networks feature is enabled // (2) primary user defined networks is enabled (all modes) func isNodeNADControllerRequired() bool { - return ((config.OVNKubernetesFeature.EnableMultiNetwork && config.OvnKubeNode.Mode == ovntypes.NodeModeDPU) || - util.IsNetworkSegmentationSupportEnabled()) + return (config.OVNKubernetesFeature.EnableMultiNetwork && config.OvnKubeNode.Mode == ovntypes.NodeModeDPU) || + util.IsNetworkSegmentationSupportEnabled() } // NewNodeNetworkControllerManager creates a new OVN controller manager to manage all the controller for all networks @@ -127,21 +123,22 @@ func NewNodeNetworkControllerManager(ovnClient *util.OVNClientset, wf factory.No // need to start NAD controller on node side for programming gateway pieces for UDNs var err error if isNodeNADControllerRequired() { - ncm.nadController, err = nad.NewNetAttachDefinitionController("node-network-controller-manager", ncm, wf) + ncm.nadController, err = nad.NewNetAttachDefinitionController("node-network-controller-manager", ncm, wf, nil) + if err != nil { + return nil, err + } } if util.IsNetworkSegmentationSupportEnabled() { ncm.vrfManager = vrfmanager.NewController(ncm.routeManager) ncm.ruleManager = iprulemanager.NewController(config.IPv4Mode, config.IPv6Mode) } - if err != nil { - return nil, err - } return ncm, nil } // initDefaultNodeNetworkController creates the controller for default network func (ncm *nodeNetworkControllerManager) initDefaultNodeNetworkController() error { - defaultNodeNetworkController, err := node.NewDefaultNodeNetworkController(ncm.newCommonNetworkControllerInfo()) + defaultNodeNetworkController, err := node.NewDefaultNodeNetworkController(ncm.newCommonNetworkControllerInfo(), + ncm.nadController) if err != nil { return err } @@ -171,6 +168,7 @@ func (ncm *nodeNetworkControllerManager) Start(ctx context.Context) (err error) // make sure we clean up after ourselves on failure defer func() { if err != nil { + klog.Errorf("Stopping node network controller manager, err=%v", err) ncm.Stop() } }() @@ -194,18 +192,23 @@ func (ncm *nodeNetworkControllerManager) Start(ctx context.Context) (err error) if err != nil { return fmt.Errorf("failed to init default node network controller: %v", err) } - err = ncm.defaultNodeNetworkController.Start(ctx) + err = ncm.defaultNodeNetworkController.PreStart(ctx) // partial gateway init + OpenFlow Manager if err != nil { return fmt.Errorf("failed to start default node network controller: %v", err) } - // nadController is nil if multi-network is disabled if ncm.nadController != nil { err = ncm.nadController.Start() if err != nil { return fmt.Errorf("failed to start NAD controller: %w", err) } } + + err = ncm.defaultNodeNetworkController.Start(ctx) + if err != nil { + return fmt.Errorf("failed to start default node network controller: %v", err) + } + if ncm.vrfManager != nil { // Let's create VRF manager that will manage VRFs for all UDNs err = ncm.vrfManager.Run(ncm.stopChan, ncm.wg) diff --git a/go-controller/pkg/network-controller-manager/node_network_controller_manager_test.go b/go-controller/pkg/network-controller-manager/node_network_controller_manager_test.go index 99ddaa530c..e63e698bc1 100644 --- a/go-controller/pkg/network-controller-manager/node_network_controller_manager_test.go +++ b/go-controller/pkg/network-controller-manager/node_network_controller_manager_test.go @@ -6,7 +6,7 @@ import ( "github.com/containernetworking/plugins/pkg/ns" "github.com/containernetworking/plugins/pkg/testutils" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" factoryMocks "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory/mocks" @@ -129,6 +129,7 @@ var _ = Describe("Healthcheck tests", func() { BeforeEach(func() { // setup kube output + factoryMock.On("NADInformer").Return(nil) ncm, err = NewNodeNetworkControllerManager(fakeClient, &factoryMock, nodeName, &sync.WaitGroup{}, nil, routeManager) Expect(err).NotTo(HaveOccurred()) factoryMock.On("GetPods", "").Return(podList, nil) @@ -144,7 +145,6 @@ var _ = Describe("Healthcheck tests", func() { "stale-pod-ifc,sandbox=123abcfaa iface-id=stale-ns_stale-pod iface-id-ver=pod-stale-uuid-3 vf-netdev-name=blah\n", Err: nil, }) - // mock calls to remove only stale-port execMock.AddFakeCmd(&ovntest.ExpectedCmd{ Cmd: genDeleteStaleRepPortCmd("stale-pod-ifc"), @@ -204,7 +204,7 @@ var _ = Describe("Healthcheck tests", func() { Expect(testutils.UnmountNS(testNS)).To(Succeed()) }) - It("check vrf devices are cleaned for deleted networks", func() { + ovntest.OnSupportedPlatformsIt("check vrf devices are cleaned for deleted networks", func() { config.OVNKubernetesFeature.EnableNetworkSegmentation = true config.OVNKubernetesFeature.EnableMultiNetwork = true @@ -223,6 +223,9 @@ var _ = Describe("Healthcheck tests", func() { factoryMock.On("GetNode", nodeName).Return(nodeList[0], nil) factoryMock.On("GetNodes").Return(nodeList, nil) factoryMock.On("NADInformer").Return(nil) + factoryMock.On("UserDefinedNetworkInformer").Return(nil) + factoryMock.On("ClusterUserDefinedNetworkInformer").Return(nil) + factoryMock.On("NamespaceInformer").Return(nil) ncm, err := NewNodeNetworkControllerManager(fakeClient, &factoryMock, nodeName, &sync.WaitGroup{}, nil, routeManager) Expect(err).NotTo(HaveOccurred()) diff --git a/go-controller/pkg/node/base_node_network_controller_dpu_test.go b/go-controller/pkg/node/base_node_network_controller_dpu_test.go index 64f3bcd34f..2f86122feb 100644 --- a/go-controller/pkg/node/base_node_network_controller_dpu_test.go +++ b/go-controller/pkg/node/base_node_network_controller_dpu_test.go @@ -3,13 +3,14 @@ package node import ( "fmt" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/stretchr/testify/mock" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" adminpolicybasedrouteclient "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/clientset/versioned/fake" factorymocks "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory/mocks" kubemocks "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube/mocks" @@ -98,6 +99,7 @@ var _ = Describe("Node DPU tests", func() { origNetlinkOps := util.GetNetLinkOps() BeforeEach(func() { + config.PrepareTestConfig() sriovnetOpsMock = utilMocks.SriovnetOps{} netlinkOpsMock = utilMocks.NetLinkOps{} execMock = ovntest.NewFakeExec() @@ -115,7 +117,7 @@ var _ = Describe("Node DPU tests", func() { apbExternalRouteClient := adminpolicybasedrouteclient.NewSimpleClientset() factoryMock = factorymocks.NodeWatchFactory{} cnnci := newCommonNodeNetworkControllerInfo(nil, &kubeMock, apbExternalRouteClient, &factoryMock, nil, "", routeManager) - dnnc = newDefaultNodeNetworkController(cnnci, nil, nil, routeManager) + dnnc = newDefaultNodeNetworkController(cnnci, nil, nil, routeManager, nil) podInformer = coreinformermocks.PodInformer{} podNamespaceLister = v1mocks.PodNamespaceLister{} diff --git a/go-controller/pkg/node/controllers/egressip/egressip.go b/go-controller/pkg/node/controllers/egressip/egressip.go index a21eac49ab..60767d674c 100644 --- a/go-controller/pkg/node/controllers/egressip/egressip.go +++ b/go-controller/pkg/node/controllers/egressip/egressip.go @@ -106,19 +106,24 @@ type referencedObjects struct { eIPPods sets.Set[ktypes.NamespacedName] } +// getActiveNetworkForNamespaceFn returns a NetInfo which contains NADs which refer to a network in addition to the basic +// network information. +type getActiveNetworkForNamespaceFn func(namespace string) (util.NetInfo, error) + // Controller implement Egress IP for secondary host networks type Controller struct { eIPLister egressiplisters.EgressIPLister eIPInformer cache.SharedIndexInformer - eIPQueue workqueue.RateLimitingInterface + eIPQueue workqueue.TypedRateLimitingInterface[string] nodeLister corelisters.NodeLister namespaceLister corelisters.NamespaceLister namespaceInformer cache.SharedIndexInformer - namespaceQueue workqueue.RateLimitingInterface + namespaceQueue workqueue.TypedRateLimitingInterface[*corev1.Namespace] - podLister corelisters.PodLister - podInformer cache.SharedIndexInformer - podQueue workqueue.RateLimitingInterface + podLister corelisters.PodLister + podInformer cache.SharedIndexInformer + podQueue workqueue.TypedRateLimitingInterface[*corev1.Pod] + getActiveNetworkForNamespace getActiveNetworkForNamespaceFn // cache is a cache of configuration states for EIPs, key is EgressIP Name. cache *syncmap.SyncMap[*state] @@ -140,40 +145,42 @@ type Controller struct { v6 bool } -func NewController(k kube.Interface, eIPInformer egressipinformer.EgressIPInformer, nodeInformer cache.SharedIndexInformer, namespaceInformer coreinformers.NamespaceInformer, - podInformer coreinformers.PodInformer, routeManager *routemanager.Controller, v4, v6 bool, nodeName string, linkManager *linkmanager.Controller) (*Controller, error) { +func NewController(k kube.Interface, eIPInformer egressipinformer.EgressIPInformer, nodeInformer cache.SharedIndexInformer, + namespaceInformer coreinformers.NamespaceInformer, podInformer coreinformers.PodInformer, getActiveNetworkForNamespaceFn getActiveNetworkForNamespaceFn, + routeManager *routemanager.Controller, v4, v6 bool, nodeName string, linkManager *linkmanager.Controller) (*Controller, error) { c := &Controller{ eIPLister: eIPInformer.Lister(), eIPInformer: eIPInformer.Informer(), - eIPQueue: workqueue.NewNamedRateLimitingQueue( - workqueue.NewItemFastSlowRateLimiter(time.Second, 5*time.Second, 5), - "eipeip", + eIPQueue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemFastSlowRateLimiter[string](time.Second, 5*time.Second, 5), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "eipeip"}, ), nodeLister: corelisters.NewNodeLister(nodeInformer.GetIndexer()), namespaceLister: namespaceInformer.Lister(), namespaceInformer: namespaceInformer.Informer(), - namespaceQueue: workqueue.NewNamedRateLimitingQueue( - workqueue.NewItemFastSlowRateLimiter(time.Second, 5*time.Second, 5), - "eipnamespace", + namespaceQueue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemFastSlowRateLimiter[*corev1.Namespace](time.Second, 5*time.Second, 5), + workqueue.TypedRateLimitingQueueConfig[*corev1.Namespace]{Name: "eipnamespace"}, ), podLister: podInformer.Lister(), podInformer: podInformer.Informer(), - podQueue: workqueue.NewNamedRateLimitingQueue( - workqueue.NewItemFastSlowRateLimiter(time.Second, 5*time.Second, 5), - "eippods", + podQueue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemFastSlowRateLimiter[*corev1.Pod](time.Second, 5*time.Second, 5), + workqueue.TypedRateLimitingQueueConfig[*corev1.Pod]{Name: "eippods"}, ), - cache: syncmap.NewSyncMap[*state](), - referencedObjectsLock: sync.RWMutex{}, - referencedObjects: map[string]*referencedObjects{}, - routeManager: routeManager, - linkManager: linkManager, - ruleManager: iprulemanager.NewController(v4, v6), - iptablesManager: iptables.NewController(), - kube: k, - nodeName: nodeName, - v4: v4, - v6: v6, + getActiveNetworkForNamespace: getActiveNetworkForNamespaceFn, + cache: syncmap.NewSyncMap[*state](), + referencedObjectsLock: sync.RWMutex{}, + referencedObjects: map[string]*referencedObjects{}, + routeManager: routeManager, + linkManager: linkManager, + ruleManager: iprulemanager.NewController(v4, v6), + iptablesManager: iptables.NewController(), + kube: k, + nodeName: nodeName, + v4: v4, + v6: v6, } return c, nil } @@ -430,7 +437,7 @@ func (c *Controller) processNextEIPWorkItem(wg *sync.WaitGroup) bool { } defer c.eIPQueue.Done(key) klog.V(4).Infof("Processing Egress IP %s", key) - if err := c.syncEIP(key.(string)); err != nil { + if err := c.syncEIP(key); err != nil { if c.eIPQueue.NumRequeues(key) < maxRetries { klog.V(4).Infof("Error found while processing Egress IP %s: %v", key, err) c.eIPQueue.AddRateLimited(key) @@ -554,6 +561,14 @@ func (c *Controller) processEIP(eip *eipv1.EgressIP) (*eIPConfig, sets.Set[strin } isEIPV6 := utilnet.IsIPv6(eIPNet.IP) for _, namespace := range namespaces { + netInfo, err := c.getActiveNetworkForNamespace(namespace.Name) + if err != nil { + return nil, selectedNamespaces, selectedPods, selectedNamespacesPodIPs, fmt.Errorf("failed to get active network for namespace %s: %v", namespace.Name, err) + } + if netInfo.IsSecondary() { + // EIP for secondary host interfaces is not supported for secondary networks + continue + } selectedNamespaces.Insert(namespace.Name) pods, err := c.listPodsByNamespaceAndSelector(namespace.Name, &eip.Spec.PodSelector) if err != nil { @@ -1011,6 +1026,14 @@ func (c *Controller) repairNode() error { for _, namespace := range namespaces { namespaceLabels := labels.Set(namespace.Labels) if namespaceSelector.Matches(namespaceLabels) { + netInfo, err := c.getActiveNetworkForNamespace(namespace.Name) + if err != nil { + return fmt.Errorf("failed to get active network for namespace %s: %v", namespace.Name, err) + } + if netInfo.IsSecondary() { + // EIP for secondary host interfaces is not supported for secondary networks + continue + } pods, err := c.podLister.Pods(namespace.Name).List(podSelector) if err != nil { return fmt.Errorf("failed to list pods using selector %s to configure egress IP %s: %v", diff --git a/go-controller/pkg/node/controllers/egressip/egressip_test.go b/go-controller/pkg/node/controllers/egressip/egressip_test.go index 86786efa41..6eefd60b55 100644 --- a/go-controller/pkg/node/controllers/egressip/egressip_test.go +++ b/go-controller/pkg/node/controllers/egressip/egressip_test.go @@ -6,7 +6,6 @@ import ( "fmt" "hash/fnv" "net" - "os" "os/exec" "reflect" "runtime" @@ -22,8 +21,15 @@ import ( ovniptables "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/iptables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/linkmanager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/routemanager" + ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + "github.com/containernetworking/plugins/pkg/ns" + "github.com/containernetworking/plugins/pkg/testutils" + nadfake "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/fake" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + "github.com/vishvananda/netlink" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -35,13 +41,6 @@ import ( utiliptables "k8s.io/kubernetes/pkg/util/iptables" kexec "k8s.io/utils/exec" utilnet "k8s.io/utils/net" - - "github.com/containernetworking/plugins/pkg/ns" - "github.com/containernetworking/plugins/pkg/testutils" - "github.com/onsi/ginkgo" - "github.com/onsi/ginkgo/extensions/table" - "github.com/onsi/gomega" - "github.com/vishvananda/netlink" ) // testPodConfig holds all the information needed to validate a config is applied for a pod @@ -257,8 +256,10 @@ func initController(namespaces []corev1.Namespace, pods []corev1.Pod, egressIPs kubeClient := fake.NewSimpleClientset(&corev1.NodeList{Items: []corev1.Node{getNodeObj(node, createEIPAnnot)}}, &corev1.NamespaceList{Items: namespaces}, &corev1.PodList{Items: pods}) egressIPClient := egressipfake.NewSimpleClientset(&egressipv1.EgressIPList{Items: egressIPs}) - ovnNodeClient := &util.OVNNodeClientset{KubeClient: kubeClient, EgressIPClient: egressIPClient} + nadClient := nadfake.NewSimpleClientset() + ovnNodeClient := &util.OVNNodeClientset{KubeClient: kubeClient, EgressIPClient: egressIPClient, NetworkAttchDefClient: nadClient} rm := routemanager.NewController() + ovnconfig.OVNKubernetesFeature.EnableMultiNetwork = true // force addition of NAD informer for node watch factory ovnconfig.OVNKubernetesFeature.EnableEgressIP = true watchFactory, err := factory.NewNodeWatchFactory(ovnNodeClient, node1Name) if err != nil { @@ -268,8 +269,12 @@ func initController(namespaces []corev1.Namespace, pods []corev1.Pod, egressIPs return nil, nil, err } linkManager := linkmanager.NewController(node1Name, v4, v6, nil) + // only CDN network is supported + getActiveNetForNsFn := func(namespace string) (util.NetInfo, error) { + return &util.DefaultNetInfo{}, nil + } c, err := NewController(&ovnkube.Kube{KClient: kubeClient}, watchFactory.EgressIPInformer(), watchFactory.NodeInformer(), watchFactory.NamespaceInformer(), - watchFactory.PodCoreInformer(), rm, v4, v6, node1Name, linkManager) + watchFactory.PodCoreInformer(), getActiveNetForNsFn, rm, v4, v6, node1Name, linkManager) if err != nil { return nil, nil, err } @@ -413,10 +418,10 @@ func runSubControllers(testNS ns.NetNS, c *Controller, wg *sync.WaitGroup, stopC // FIXME(mk) - Within GH VM, if I need to create a new NetNs. I see the following error: // "failed to create new network namespace: mount --make-rshared /run/user/1001/netns failed: "operation not permitted"" -var _ = table.DescribeTable("EgressIP selectors", +var _ = ginkgo.DescribeTable("EgressIP selectors", func(expectedEIPConfigs []eipConfig, pods []corev1.Pod, namespaces []corev1.Namespace, nodeConfig nodeConfig) { defer ginkgo.GinkgoRecover() - if os.Getenv("NOROOT") == "TRUE" { + if ovntest.NoRoot() { ginkgo.Skip("Test requires root privileges") } if !commandExists("iptables") { @@ -702,7 +707,7 @@ var _ = table.DescribeTable("EgressIP selectors", gomega.Expect(cleanupControllerFn()).ShouldNot(gomega.HaveOccurred()) gomega.Expect(cleanupNodeFn()).ShouldNot(gomega.HaveOccurred()) }, - table.Entry("configures nothing when EIPs dont select anything", + ginkgo.Entry("configures nothing when EIPs dont select anything", []eipConfig{ { eIP: newEgressIP(egressIP1Name, egressIP1IPV4, node1Name, namespace1Label, egressPodLabel), @@ -715,7 +720,7 @@ var _ = table.DescribeTable("EgressIP selectors", {dummyLink2Name, []address{{dummy2IPv4CIDR, false}}}}, }, ), - table.Entry("configures one IPv4 EIP and one Pod", + ginkgo.Entry("configures one IPv4 EIP and one Pod", []eipConfig{ { newEgressIP(egressIP1Name, egressIP1IPV4, node1Name, namespace1Label, egressPodLabel), @@ -739,7 +744,7 @@ var _ = table.DescribeTable("EgressIP selectors", {dummyLink2Name, []address{{dummy2IPv4CIDR, false}}}}, }, ), - table.Entry("configures one IPv6 EIP and one Pod", + ginkgo.Entry("configures one IPv6 EIP and one Pod", []eipConfig{ { newEgressIP(egressIP1Name, egressIP1IPV6Compressed, node1Name, namespace1Label, egressPodLabel), @@ -764,7 +769,7 @@ var _ = table.DescribeTable("EgressIP selectors", {dummyLink2Name, []address{{dummy2IPv6CIDRCompressed, false}}}}, }, ), - table.Entry("configures one uncompressed IPv6 EIP and one Pod", + ginkgo.Entry("configures one uncompressed IPv6 EIP and one Pod", []eipConfig{ { newEgressIP(egressIP1Name, egressIP1IPV6Uncompressed, node1Name, namespace1Label, egressPodLabel), @@ -789,7 +794,7 @@ var _ = table.DescribeTable("EgressIP selectors", {dummyLink2Name, []address{{dummy2IPv6CIDRCompressed, false}}}}, }, ), - table.Entry("configures one IPv4 EIP and multiple pods", + ginkgo.Entry("configures one IPv4 EIP and multiple pods", // Test pod and namespace selection - []eipConfig{ { @@ -821,7 +826,7 @@ var _ = table.DescribeTable("EgressIP selectors", {dummyLink2Name, []address{{dummy2IPv4CIDR, false}}}}, }, ), - table.Entry("configures one IPv6 EIP and multiple pods", + ginkgo.Entry("configures one IPv6 EIP and multiple pods", // Test pod and namespace selection - []eipConfig{ { @@ -854,7 +859,7 @@ var _ = table.DescribeTable("EgressIP selectors", {dummyLink2Name, []address{{dummy2IPv6CIDRCompressed, false}}}}, }, ), - table.Entry("configures one IPv4 EIP and multiple namespaces and multiple pods", + ginkgo.Entry("configures one IPv4 EIP and multiple namespaces and multiple pods", []eipConfig{ { newEgressIP(egressIP1Name, egressIP1IPV4, node1Name, namespace1Label, egressPodLabel), @@ -887,7 +892,7 @@ var _ = table.DescribeTable("EgressIP selectors", {dummyLink2Name, []address{{dummy2IPv4CIDR, false}}}}, }, ), - table.Entry("configures one IPv6 EIP and multiple namespaces and multiple pods", + ginkgo.Entry("configures one IPv6 EIP and multiple namespaces and multiple pods", []eipConfig{ { newEgressIP(egressIP1Name, egressIP1IPV6Compressed, node1Name, namespace1Label, egressPodLabel), @@ -920,7 +925,7 @@ var _ = table.DescribeTable("EgressIP selectors", {dummyLink2Name, []address{{dummy2IPv6CIDRCompressed, false}}}}, }, ), - table.Entry("configures multiple IPv4 EIPs on different links, multiple namespaces and multiple pods", + ginkgo.Entry("configures multiple IPv4 EIPs on different links, multiple namespaces and multiple pods", []eipConfig{ { newEgressIP(egressIP1Name, egressIP1IPV4, node1Name, namespace1Label, egressPodLabel), @@ -974,7 +979,7 @@ var _ = table.DescribeTable("EgressIP selectors", {dummyLink4Name, []address{{dummy4IPv4CIDR, false}}}}, }, ), - table.Entry("configures multiple IPv6 EIPs on different links, multiple namespaces and multiple pods", + ginkgo.Entry("configures multiple IPv6 EIPs on different links, multiple namespaces and multiple pods", []eipConfig{ { newEgressIP(egressIP1Name, egressIP1IPV6Compressed, node1Name, namespace1Label, egressPodLabel), @@ -1036,7 +1041,7 @@ var _ = ginkgo.Describe("label to annotations migration", func() { // Test using root and a test netns because we want to test between netlink lib // and the egress IP components (link manager, route manager) defer ginkgo.GinkgoRecover() - if os.Getenv("NOROOT") == "TRUE" { + if ovntest.NoRoot() { ginkgo.Skip("Test requires root privileges") } if !commandExists("iptables") { @@ -1096,7 +1101,7 @@ var _ = ginkgo.Describe("label to annotations migration", func() { var _ = ginkgo.Describe("VRF", func() { ginkgo.It("copies routes from the VRF routing table for a link enslaved by VRF device", func() { defer ginkgo.GinkgoRecover() - if os.Getenv("NOROOT") == "TRUE" { + if ovntest.NoRoot() { ginkgo.Skip("Test requires root privileges") } if !commandExists("iptables") { @@ -1142,12 +1147,12 @@ var _ = ginkgo.Describe("VRF", func() { }) }) -var _ = table.DescribeTable("repair node", func(expectedStateFollowingClean []eipConfig, +var _ = ginkgo.DescribeTable("repair node", func(expectedStateFollowingClean []eipConfig, nodeConfigsBeforeRepair nodeConfig, pods []corev1.Pod, namespaces []corev1.Namespace) { // Test using root and a test netns because we want to test between netlink lib // and the egress IP components (link manager, route manager) defer ginkgo.GinkgoRecover() - if os.Getenv("NOROOT") == "TRUE" { + if ovntest.NoRoot() { ginkgo.Skip("Test requires root privileges") } if !commandExists("iptables") { @@ -1237,7 +1242,7 @@ var _ = table.DescribeTable("repair node", func(expectedStateFollowingClean []ei close(stopCh) wg.Wait() gomega.Expect(cleanupNodeFn()).ShouldNot(gomega.HaveOccurred()) -}, table.Entry("should not fail when node is clean and nothing to apply", +}, ginkgo.Entry("should not fail when node is clean and nothing to apply", []eipConfig{}, nodeConfig{ linkConfigs: []linkConfig{ @@ -1246,7 +1251,7 @@ var _ = table.DescribeTable("repair node", func(expectedStateFollowingClean []ei }, []corev1.Pod{newPodWithLabels(namespace1, pod1Name, node1Name, pod1IPv4, map[string]string{})}, []corev1.Namespace{newNamespaceWithLabels(namespace1, namespace1Label)}), - table.Entry("should remove stale route with no assigned IP", + ginkgo.Entry("should remove stale route with no assigned IP", []eipConfig{}, nodeConfig{ // node state before repair routes: []netlink.Route{getDefaultIPv4Route(getLinkIndex(dummyLink1Name))}, @@ -1256,7 +1261,7 @@ var _ = table.DescribeTable("repair node", func(expectedStateFollowingClean []ei }, []corev1.Pod{}, []corev1.Namespace{}), - table.Entry("should remove stale address", + ginkgo.Entry("should remove stale address", []eipConfig{}, nodeConfig{ // node state before repair linkConfigs: []linkConfig{ @@ -1265,7 +1270,7 @@ var _ = table.DescribeTable("repair node", func(expectedStateFollowingClean []ei }, []corev1.Pod{}, []corev1.Namespace{}), - table.Entry("should remove stale route and EIP address on wrong link", + ginkgo.Entry("should remove stale route and EIP address on wrong link", []eipConfig{ { eIP: newEgressIP(egressIP1Name, egressIP1IPV4, node1Name, namespace1Label, egressPodLabel), @@ -1280,7 +1285,7 @@ var _ = table.DescribeTable("repair node", func(expectedStateFollowingClean []ei }, []corev1.Pod{}, []corev1.Namespace{}), - table.Entry("should remove stale iptables rules", + ginkgo.Entry("should remove stale iptables rules", []eipConfig{ { eIP: newEgressIP(egressIP1Name, egressIP1IPV4, node1Name, namespace1Label, egressPodLabel), @@ -1292,7 +1297,7 @@ var _ = table.DescribeTable("repair node", func(expectedStateFollowingClean []ei }, []corev1.Pod{}, []corev1.Namespace{}), - table.Entry("should remove stale iptables rules but not valid rules", + ginkgo.Entry("should remove stale iptables rules but not valid rules", []eipConfig{ { eIP: newEgressIP(egressIP1Name, egressIP1IPV4, node1Name, namespace1Label, egressPodLabel), diff --git a/go-controller/pkg/node/controllers/egressip/namespace.go b/go-controller/pkg/node/controllers/egressip/namespace.go index 096589fc2e..4191733b80 100644 --- a/go-controller/pkg/node/controllers/egressip/namespace.go +++ b/go-controller/pkg/node/controllers/egressip/namespace.go @@ -75,22 +75,21 @@ func (c *Controller) runNamespaceWorker(wg *sync.WaitGroup) { func (c *Controller) processNextNamespaceWorkItem(wg *sync.WaitGroup) bool { wg.Add(1) defer wg.Done() - obj, shutdown := c.namespaceQueue.Get() + ns, shutdown := c.namespaceQueue.Get() if shutdown { return false } - defer c.namespaceQueue.Done(obj) - ns := obj.(*corev1.Namespace) + defer c.namespaceQueue.Done(ns) if err := c.syncNamespace(ns); err != nil { - if c.namespaceQueue.NumRequeues(obj) < maxRetries { - klog.V(4).Infof("Error found while processing namespace %s: %v", obj.(*corev1.Namespace), err) - c.namespaceQueue.AddRateLimited(obj) + if c.namespaceQueue.NumRequeues(ns) < maxRetries { + klog.V(4).Infof("Error found while processing namespace %q: %v", ns.Name, err) + c.namespaceQueue.AddRateLimited(ns) return true } - klog.Errorf("Dropping namespace %q out of the queue: %v", obj.(*corev1.Namespace).Name, err) + klog.Errorf("Dropping namespace %q out of the queue: %v", ns.Name, err) utilruntime.HandleError(err) } - c.namespaceQueue.Forget(obj) + c.namespaceQueue.Forget(ns) return true } diff --git a/go-controller/pkg/node/controllers/egressip/pod.go b/go-controller/pkg/node/controllers/egressip/pod.go index f533cc6156..33a1ed98ea 100644 --- a/go-controller/pkg/node/controllers/egressip/pod.go +++ b/go-controller/pkg/node/controllers/egressip/pod.go @@ -173,7 +173,7 @@ func (c *Controller) onPodUpdate(oldObj, newObj interface{}) { reflect.DeepEqual(o.Status.PodIPs, n.Status.PodIPs) { return } - c.podQueue.Add(newObj) + c.podQueue.Add(n) } func (c *Controller) onPodDelete(obj interface{}) { @@ -203,22 +203,21 @@ func (c *Controller) runPodWorker(wg *sync.WaitGroup) { func (c *Controller) processNextPodWorkItem(wg *sync.WaitGroup) bool { wg.Add(1) defer wg.Done() - obj, shutdown := c.podQueue.Get() + p, shutdown := c.podQueue.Get() if shutdown { return false } - defer c.podQueue.Done(obj) - p := obj.(*corev1.Pod) + defer c.podQueue.Done(p) if err := c.syncPod(p); err != nil { - if c.podQueue.NumRequeues(obj) < maxRetries { + if c.podQueue.NumRequeues(p) < maxRetries { klog.V(4).Infof("Error found while processing pod %s/%s: %v", p.Namespace, p.Name, err) - c.podQueue.AddRateLimited(obj) + c.podQueue.AddRateLimited(p) return true } klog.Warningf("Dropping pod %s/%s out of the queue: %s", p.Namespace, p.Name, err) utilruntime.HandleError(err) } - c.podQueue.Forget(obj) + c.podQueue.Forget(p) return true } diff --git a/go-controller/pkg/node/controllers/egressip/suite_test.go b/go-controller/pkg/node/controllers/egressip/suite_test.go index 78a20def19..3b87108f18 100644 --- a/go-controller/pkg/node/controllers/egressip/suite_test.go +++ b/go-controller/pkg/node/controllers/egressip/suite_test.go @@ -1,7 +1,7 @@ package egressip import ( - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "testing" ) diff --git a/go-controller/pkg/node/controllers/egressservice/egressservice_node.go b/go-controller/pkg/node/controllers/egressservice/egressservice_node.go index c9bf1d7d6a..f3c5e16bf7 100644 --- a/go-controller/pkg/node/controllers/egressservice/egressservice_node.go +++ b/go-controller/pkg/node/controllers/egressservice/egressservice_node.go @@ -55,7 +55,7 @@ type Controller struct { egressServiceLister egressservicelisters.EgressServiceLister egressServiceSynced cache.InformerSynced - egressServiceQueue workqueue.RateLimitingInterface + egressServiceQueue workqueue.TypedRateLimitingInterface[string] serviceLister corelisters.ServiceLister servicesSynced cache.InformerSynced @@ -94,9 +94,9 @@ func NewController(stopCh <-chan struct{}, returnMark, thisNode string, c.egressServiceLister = esInformer.Lister() c.egressServiceSynced = esInformer.Informer().HasSynced - c.egressServiceQueue = workqueue.NewNamedRateLimitingQueue( - workqueue.NewItemFastSlowRateLimiter(1*time.Second, 5*time.Second, 5), - "egressservices", + c.egressServiceQueue = workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemFastSlowRateLimiter[string](1*time.Second, 5*time.Second, 5), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "egressservices"}, ) _, err := esInformer.Informer().AddEventHandler(factory.WithUpdateHandlingForObjReplace(cache.ResourceEventHandlerFuncs{ AddFunc: c.onEgressServiceAdd, @@ -613,6 +613,11 @@ func (c *Controller) repairIPTables(v4EpsToServices, v6EpsToServices map[string] errorList := []error{} for _, rule := range rulesToDel { args := strings.Fields(rule) + if len(args) < 2 { + continue + } + // strip "-A OVN-KUBE-EGRESS-SVC" + args = args[2:] err := ipt.Delete("nat", Chain, args...) if err != nil { errorList = append(errorList, err) @@ -677,7 +682,7 @@ func (c *Controller) processNextEgressServiceWorkItem(wg *sync.WaitGroup) bool { defer c.egressServiceQueue.Done(key) - err := c.syncEgressService(key.(string)) + err := c.syncEgressService(key) if err == nil { c.egressServiceQueue.Forget(key) return true diff --git a/go-controller/pkg/node/controllers/egressservice/egressservice_node_endpointslice.go b/go-controller/pkg/node/controllers/egressservice/egressservice_node_endpointslice.go index 91552e5a12..d248e216f9 100644 --- a/go-controller/pkg/node/controllers/egressservice/egressservice_node_endpointslice.go +++ b/go-controller/pkg/node/controllers/egressservice/egressservice_node_endpointslice.go @@ -62,7 +62,7 @@ func (c *Controller) onEndpointSliceDelete(obj interface{}) { func (c *Controller) queueServiceForEndpointSlice(endpointSlice *discovery.EndpointSlice) { - key, err := services.ServiceControllerKey(endpointSlice) + key, err := services.GetServiceKeyFromEndpointSliceForDefaultNetwork(endpointSlice) if err != nil { // Do not log endpointsSlices missing service labels as errors. // Once the service label is eventually added, we will get this event diff --git a/go-controller/pkg/node/default_node_network_controller.go b/go-controller/pkg/node/default_node_network_controller.go index 58280d3ac7..b5d2548800 100644 --- a/go-controller/pkg/node/default_node_network_controller.go +++ b/go-controller/pkg/node/default_node_network_controller.go @@ -25,6 +25,7 @@ import ( "github.com/containernetworking/plugins/pkg/ip" v1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + honode "github.com/ovn-org/ovn-kubernetes/go-controller/hybrid-overlay/pkg/controller" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni" config "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" @@ -32,6 +33,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/informer" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" + nad "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/network-attach-def-controller" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/controllers/egressip" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/controllers/egressservice" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/linkmanager" @@ -105,6 +107,7 @@ type DefaultNodeNetworkController struct { // Node healthcheck server for cloud load balancers healthzServer *proxierHealthUpdater routeManager *routemanager.Controller + linkManager *linkmanager.Controller // retry framework for namespaces, used for the removal of stale conntrack entries for external gateways retryNamespaces *retry.RetryFramework @@ -112,12 +115,27 @@ type DefaultNodeNetworkController struct { retryEndpointSlices *retry.RetryFramework apbExternalRouteNodeController *apbroute.ExternalGatewayNodeController + + nadController *nad.NetAttachDefinitionController + + cniServer *cni.Server + + gatewaySetup *preStartSetup + + udnHostIsolationManager *UDNHostIsolationManager +} + +type preStartSetup struct { + mgmtPorts []managementPortEntry + mgmtPortConfig *managementPortConfig + nodeAddress net.IP + sbZone string } func newDefaultNodeNetworkController(cnnci *CommonNodeNetworkControllerInfo, stopChan chan struct{}, - wg *sync.WaitGroup, routeManager *routemanager.Controller) *DefaultNodeNetworkController { + wg *sync.WaitGroup, routeManager *routemanager.Controller, nadController *nad.NetAttachDefinitionController) *DefaultNodeNetworkController { - return &DefaultNodeNetworkController{ + c := &DefaultNodeNetworkController{ BaseNodeNetworkController: BaseNodeNetworkController{ CommonNodeNetworkControllerInfo: *cnnci, NetInfo: &util.DefaultNetInfo{}, @@ -126,14 +144,20 @@ func newDefaultNodeNetworkController(cnnci *CommonNodeNetworkControllerInfo, sto }, routeManager: routeManager, } + if util.IsNetworkSegmentationSupportEnabled() && !config.OVNKubernetesFeature.DisableUDNHostIsolation { + c.udnHostIsolationManager = NewUDNHostIsolationManager(config.IPv4Mode, config.IPv6Mode, + cnnci.watchFactory.PodCoreInformer(), nadController) + } + c.linkManager = linkmanager.NewController(cnnci.name, config.IPv4Mode, config.IPv6Mode, c.updateGatewayMAC) + return c } // NewDefaultNodeNetworkController creates a new network controller for node management of the default network -func NewDefaultNodeNetworkController(cnnci *CommonNodeNetworkControllerInfo) (*DefaultNodeNetworkController, error) { +func NewDefaultNodeNetworkController(cnnci *CommonNodeNetworkControllerInfo, nadController *nad.NetAttachDefinitionController) (*DefaultNodeNetworkController, error) { var err error stopChan := make(chan struct{}) wg := &sync.WaitGroup{} - nc := newDefaultNodeNetworkController(cnnci, stopChan, wg, cnnci.routeManager) + nc := newDefaultNodeNetworkController(cnnci, stopChan, wg, cnnci.routeManager, nadController) if len(config.Kubernetes.HealthzBindAddress) != 0 { klog.Infof("Enable node proxy healthz server on %s", config.Kubernetes.HealthzBindAddress) @@ -153,6 +177,8 @@ func NewDefaultNodeNetworkController(cnnci *CommonNodeNetworkControllerInfo) (*D return nil, err } + nc.nadController = nadController + nc.initRetryFrameworkForNode() return nc, nil @@ -311,7 +337,6 @@ func setupOVNNode(node *kapi.Node) error { // to finish computation specially with complex acl configuration with port range. fmt.Sprintf("other_config:bundle-idle-timeout=%d", config.Default.OpenFlowProbe), - fmt.Sprintf("external_ids:hostname=\"%s\"", node.Name), // If Interconnect feature is enabled, we want to tell ovn-controller to // make this node/chassis as an interconnect gateway. fmt.Sprintf("external_ids:ovn-is-interconn=%s", strconv.FormatBool(config.OVNKubernetesFeature.EnableInterconnect)), @@ -334,6 +359,12 @@ func setupOVNNode(node *kapi.Node) error { ) } + // In the case of DPU, the hostname should be that of the DPU and not + // the K8s Node's. So skip setting the incorrect hostname. + if config.OvnKubeNode.Mode != types.NodeModeDPU { + setExternalIdsCmd = append(setExternalIdsCmd, fmt.Sprintf("external_ids:hostname=\"%s\"", node.Name)) + } + _, stderr, err := util.RunOVSVsctl(setExternalIdsCmd...) if err != nil { return fmt.Errorf("error setting OVS external IDs: %v\n %q", err, stderr) @@ -688,16 +719,19 @@ func portExists(namespace, name string) bool { /** HACK END **/ -// Start learns the subnets assigned to it by the master controller -// and calls the SetupNode script which establishes the logical switch -func (nc *DefaultNodeNetworkController) Start(ctx context.Context) error { - klog.Infof("Starting the default node network controller") +// PreStart executes the first steps to start the DefaultNodeNetworkController. +// It is split from Start() and executed before SecondaryNodeNetworkController (SNNC), +// to allow SNNC to reference the openflow manager created in PreStart. +func (nc *DefaultNodeNetworkController) PreStart(ctx context.Context) error { + klog.Infof("PreStarting the default node network controller") var err error var node *kapi.Node var subnets []*net.IPNet var cniServer *cni.Server + gatewaySetup := &preStartSetup{} + // Setting debug log level during node bring up to expose bring up process. // Log level is returned to configured value when bring up is complete. var level klog.Level @@ -705,13 +739,17 @@ func (nc *DefaultNodeNetworkController) Start(ctx context.Context) error { klog.Errorf("Setting klog \"loglevel\" to 5 failed, err: %v", err) } - if err = configureGlobalForwarding(); err != nil { - return err + if config.OvnKubeNode.Mode != types.NodeModeDPU { + if err = configureGlobalForwarding(); err != nil { + return err + } } - // Bootstrap flows in OVS if just normal flow is present - if err := bootstrapOVSFlows(nc.name); err != nil { - return fmt.Errorf("failed to bootstrap OVS flows: %w", err) + if config.OvnKubeNode.Mode != types.NodeModeDPUHost { + // Bootstrap flows in OVS if just normal flow is present + if err := bootstrapOVSFlows(nc.name); err != nil { + return fmt.Errorf("failed to bootstrap OVS flows: %w", err) + } } if node, err = nc.Kube.GetNode(nc.name); err != nil { @@ -766,6 +804,15 @@ func (nc *DefaultNodeNetworkController) Start(ctx context.Context) error { if err != nil { return err } + if nc.udnHostIsolationManager != nil { + if err = nc.udnHostIsolationManager.Start(ctx); err != nil { + return err + } + } else { + if err = CleanupUDNHostIsolation(); err != nil { + return fmt.Errorf("failed cleaning up UDN host isolation: %w", err) + } + } } // First wait for the node logical switch to be created by the Master, timeout is 300s. @@ -792,10 +839,80 @@ func (nc *DefaultNodeNetworkController) Start(ctx context.Context) error { if !ok { return fmt.Errorf("cannot get kubeclient for starting CNI server") } - cniServer, err = cni.NewCNIServer(nc.watchFactory, kclient.KClient) + cniServer, err = cni.NewCNIServer(nc.watchFactory, kclient.KClient, nc.nadController) if err != nil { return err } + nc.cniServer = cniServer + } + + nodeAnnotator := kube.NewNodeAnnotator(nc.Kube, node.Name) + waiter := newStartupWaiter() + + // Setup management ports + mgmtPorts, mgmtPortConfig, err := createNodeManagementPorts(node, nc.watchFactory.NodeCoreInformer().Lister(), nodeAnnotator, + nc.Kube, waiter, subnets, nc.routeManager) + if err != nil { + return err + } + gatewaySetup.mgmtPorts = mgmtPorts + gatewaySetup.mgmtPortConfig = mgmtPortConfig + + if err := util.SetNodeZone(nodeAnnotator, sbZone); err != nil { + return fmt.Errorf("failed to set node zone annotation for node %s: %w", nc.name, err) + } + if err := nodeAnnotator.Run(); err != nil { + return fmt.Errorf("failed to set node %s annotations: %w", nc.name, err) + } + + // Connect ovn-controller to SBDB + for _, auth := range []config.OvnAuthConfig{config.OvnNorth, config.OvnSouth} { + if err := auth.SetDBAuth(); err != nil { + return fmt.Errorf("unable to set the authentication towards OVN local dbs") + } + } + + // First part of gateway initialization. It will be completed by (nc *DefaultNodeNetworkController) Start() + if config.OvnKubeNode.Mode != types.NodeModeDPUHost { + // Initialize gateway for OVS internal port or representor management port + gw, err := nc.initGatewayPreStart(subnets, nodeAnnotator, mgmtPortConfig, nodeAddr) + if err != nil { + return err + } + nc.Gateway = gw + } + + if err := level.Set(strconv.Itoa(config.Logging.Level)); err != nil { + klog.Errorf("Reset of initial klog \"loglevel\" failed, err: %v", err) + } + gatewaySetup.sbZone = sbZone + nc.gatewaySetup = gatewaySetup + + return nil + +} + +// Start learns the subnets assigned to it by the master controller +// and calls the SetupNode script which establishes the logical switch +func (nc *DefaultNodeNetworkController) Start(ctx context.Context) error { + klog.Infof("Starting the default node network controller") + + var err error + var node *kapi.Node + + if nc.gatewaySetup == nil { + return fmt.Errorf("default node network controller hasn't been pre-started") + } + + // Setting debug log level during node bring up to expose bring up process. + // Log level is returned to configured value when bring up is complete. + var level klog.Level + if err := level.Set("5"); err != nil { + klog.Errorf("Setting klog \"loglevel\" to 5 failed, err: %v", err) + } + + if node, err = nc.Kube.GetNode(nc.name); err != nil { + return fmt.Errorf("error retrieving node %s: %v", nc.name, err) } nodeAnnotator := kube.NewNodeAnnotator(nc.Kube, node.Name) @@ -821,34 +938,19 @@ func (nc *DefaultNodeNetworkController) Start(ctx context.Context) error { config.OvnKubeNode.MgmtPortNetdev, config.OvnKubeNode.MgmtPortDPResourceName) } - // Setup management ports - mgmtPorts, mgmtPortConfig, err := createNodeManagementPorts(node, nc.watchFactory.NodeCoreInformer().Lister(), nodeAnnotator, - nc.Kube, waiter, subnets, nc.routeManager) - if err != nil { - return err - } - - // Initialize gateway + // Complete gateway initialization if config.OvnKubeNode.Mode == types.NodeModeDPUHost { - err = nc.initGatewayDPUHost(nodeAddr) + err = nc.initGatewayDPUHost(nc.gatewaySetup.nodeAddress) if err != nil { return err } } else { - // Initialize gateway for OVS internal port or representor management port - if err := nc.initGateway(subnets, nodeAnnotator, waiter, mgmtPortConfig, nodeAddr); err != nil { + gw := nc.Gateway.(*gateway) + if err := nc.initGatewayMainStart(gw, waiter); err != nil { return err } } - if err := util.SetNodeZone(nodeAnnotator, sbZone); err != nil { - return fmt.Errorf("failed to set node zone annotation for node %s: %w", nc.name, err) - } - - if err := nodeAnnotator.Run(); err != nil { - return fmt.Errorf("failed to set node %s annotations: %w", nc.name, err) - } - // If EncapPort is not the default tell sbdb to use specified port. // We set the encap port after annotating the zone name so that ovnkube-controller has come up // and configured the chassis in SBDB (ovnkube-controller waits for ovnkube-node to set annotation @@ -882,7 +984,7 @@ func (nc *DefaultNodeNetworkController) Start(ctx context.Context) error { // plumbing (takes 80ms based on what we saw in CI runs so we might still have that small window of disruption). // NOTE: ovnkube-node in DPU host mode doesn't go through upgrades for OVN-IC and has no SBDB to connect to. Thus this part shall be skipped. var syncNodes, syncServices, syncPods bool - if config.OvnKubeNode.Mode != types.NodeModeDPUHost && config.OVNKubernetesFeature.EnableInterconnect && sbZone != types.OvnDefaultZone && !util.HasNodeMigratedZone(node) { // so this should be done only once in phase2 (not in phase1) + if config.OvnKubeNode.Mode != types.NodeModeDPUHost && config.OVNKubernetesFeature.EnableInterconnect && nc.gatewaySetup.sbZone != types.OvnDefaultZone && !util.HasNodeMigratedZone(node) { klog.Info("Upgrade Hack: Interconnect is enabled") var err1 error start := time.Now() @@ -964,7 +1066,7 @@ func (nc *DefaultNodeNetworkController) Start(ctx context.Context) error { if err != nil { return fmt.Errorf("upgrade hack: failed while waiting for the remote ovnkube-controller to be ready: %v, %v", err, err1) } - if err := util.SetNodeZoneMigrated(nodeAnnotator, sbZone); err != nil { + if err := util.SetNodeZoneMigrated(nodeAnnotator, nc.gatewaySetup.sbZone); err != nil { return fmt.Errorf("upgrade hack: failed to set node zone annotation for node %s: %w", nc.name, err) } if err := nodeAnnotator.Run(); err != nil { @@ -1001,7 +1103,6 @@ func (nc *DefaultNodeNetworkController) Start(ctx context.Context) error { return err } } - } if config.HybridOverlay.Enabled { @@ -1040,7 +1141,7 @@ func (nc *DefaultNodeNetworkController) Start(ctx context.Context) error { } // start management ports health check - for _, mgmtPort := range mgmtPorts { + for _, mgmtPort := range nc.gatewaySetup.mgmtPorts { mgmtPort.port.CheckManagementPortHealth(nc.routeManager, mgmtPort.config, nc.stopChan) if config.OVNKubernetesFeature.EnableEgressIP { // Start the health checking server used by egressip, if EgressIPNodeHealthCheckPort is specified @@ -1056,7 +1157,7 @@ func (nc *DefaultNodeNetworkController) Start(ctx context.Context) error { // "k8s.ovn.org/external-gw-pod-ips". In that case, we need ovnkube-node to flush // conntrack on every node. In multi-zone-interconnect case, we will handle the flushing // directly on the ovnkube-controller code to avoid an extra namespace annotation - if !config.OVNKubernetesFeature.EnableInterconnect || sbZone == types.OvnDefaultZone { + if !config.OVNKubernetesFeature.EnableInterconnect || nc.gatewaySetup.sbZone == types.OvnDefaultZone { err := nc.WatchNamespaces() if err != nil { return fmt.Errorf("failed to watch namespaces: %w", err) @@ -1082,8 +1183,10 @@ func (nc *DefaultNodeNetworkController) Start(ctx context.Context) error { } } else { // start the cni server - if err := cniServer.Start(cni.ServerRunDir); err != nil { - return err + if nc.cniServer != nil { + if err := nc.cniServer.Start(cni.ServerRunDir); err != nil { + return err + } } // Write CNI config file if it doesn't already exist @@ -1109,13 +1212,10 @@ func (nc *DefaultNodeNetworkController) Start(ctx context.Context) error { } } - // create link manager, will work for egress IP as well as monitoring MAC changes to default gw bridge - linkManager := linkmanager.NewController(nc.name, config.IPv4Mode, config.IPv6Mode, nc.updateGatewayMAC) - if config.OVNKubernetesFeature.EnableEgressIP && !util.PlatformTypeIsEgressIPCloudProvider() { c, err := egressip.NewController(nc.Kube, nc.watchFactory.EgressIPInformer(), nc.watchFactory.NodeInformer(), - nc.watchFactory.NamespaceInformer(), nc.watchFactory.PodCoreInformer(), nc.routeManager, config.IPv4Mode, - config.IPv6Mode, nc.name, linkManager) + nc.watchFactory.NamespaceInformer(), nc.watchFactory.PodCoreInformer(), nc.nadController.GetActiveNetworkForNamespace, + nc.routeManager, config.IPv4Mode, config.IPv6Mode, nc.name, nc.linkManager) if err != nil { return fmt.Errorf("failed to create egress IP controller: %v", err) } @@ -1126,7 +1226,7 @@ func (nc *DefaultNodeNetworkController) Start(ctx context.Context) error { klog.Infof("Egress IP for secondary host network is disabled") } - linkManager.Run(nc.stopChan, nc.wg) + nc.linkManager.Run(nc.stopChan, nc.wg) nc.wg.Add(1) go func() { @@ -1346,6 +1446,19 @@ func DummyNextHopIPs() []net.IP { return nextHops } +// DummyMasqueradeIPs returns the fake host masquerade IPs used for service traffic routing. +// It is used in: br-ex, where we SNAT the traffic destined towards a service IP +func DummyMasqueradeIPs() []net.IP { + var nextHops []net.IP + if config.IPv4Mode { + nextHops = append(nextHops, config.Gateway.MasqueradeIPs.V4HostMasqueradeIP) + } + if config.IPv6Mode { + nextHops = append(nextHops, config.Gateway.MasqueradeIPs.V6HostMasqueradeIP) + } + return nextHops +} + // configureGlobalForwarding configures the global forwarding settings. // It sets the FORWARD policy to DROP/ACCEPT based on the config.Gateway.DisableForwarding value for all enabled IP families. // For IPv6 it additionally always enables the global forwarding. diff --git a/go-controller/pkg/node/default_node_network_controller_test.go b/go-controller/pkg/node/default_node_network_controller_test.go index 5182a5693d..c1b83f7c86 100644 --- a/go-controller/pkg/node/default_node_network_controller_test.go +++ b/go-controller/pkg/node/default_node_network_controller_test.go @@ -19,7 +19,7 @@ import ( kapi "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -278,12 +278,12 @@ var _ = Describe("Node", func() { "external_ids:ovn-remote-probe-interval=%d "+ "external_ids:ovn-openflow-probe-interval=%d "+ "other_config:bundle-idle-timeout=%d "+ - "external_ids:hostname=\"%s\" "+ "external_ids:ovn-is-interconn=false "+ "external_ids:ovn-monitor-all=true "+ "external_ids:ovn-ofctrl-wait-before-clear=0 "+ "external_ids:ovn-enable-lflow-cache=true "+ - "external_ids:ovn-set-local-ip=\"true\"", + "external_ids:ovn-set-local-ip=\"true\" "+ + "external_ids:hostname=\"%s\"", nodeIP, interval, ofintval, ofintval, nodeName), }) fexec.AddFakeCmd(&ovntest.ExpectedCmd{ @@ -299,6 +299,7 @@ var _ = Describe("Node", func() { _, err = config.InitConfig(ctx, fexec, nil) Expect(err).NotTo(HaveOccurred()) + config.OvnKubeNode.Mode = types.NodeModeFull err = setupOVNNode(&node) Expect(err).NotTo(HaveOccurred()) @@ -383,14 +384,14 @@ var _ = Describe("Node", func() { "external_ids:ovn-remote-probe-interval=%d "+ "external_ids:ovn-openflow-probe-interval=%d "+ "other_config:bundle-idle-timeout=%d "+ - "external_ids:hostname=\"%s\" "+ "external_ids:ovn-is-interconn=false "+ "external_ids:ovn-monitor-all=true "+ "external_ids:ovn-ofctrl-wait-before-clear=0 "+ "external_ids:ovn-enable-lflow-cache=false "+ "external_ids:ovn-set-local-ip=\"true\" "+ "external_ids:ovn-limit-lflow-cache=1000 "+ - "external_ids:ovn-memlimit-lflow-cache-kb=100000", + "external_ids:ovn-memlimit-lflow-cache-kb=100000 "+ + "external_ids:hostname=\"%s\"", nodeIP, interval, ofintval, ofintval, nodeName), }) fexec.AddFakeCmd(&ovntest.ExpectedCmd{ @@ -409,6 +410,7 @@ var _ = Describe("Node", func() { config.Default.LFlowCacheEnable = false config.Default.LFlowCacheLimit = 1000 config.Default.LFlowCacheLimitKb = 100000 + config.OvnKubeNode.Mode = types.NodeModeFull err = setupOVNNode(&node) Expect(err).NotTo(HaveOccurred()) @@ -452,12 +454,12 @@ var _ = Describe("Node", func() { "external_ids:ovn-remote-probe-interval=%d "+ "external_ids:ovn-openflow-probe-interval=%d "+ "other_config:bundle-idle-timeout=%d "+ - "external_ids:hostname=\"%s\" "+ "external_ids:ovn-is-interconn=false "+ "external_ids:ovn-monitor-all=true "+ "external_ids:ovn-ofctrl-wait-before-clear=0 "+ "external_ids:ovn-enable-lflow-cache=true "+ - "external_ids:ovn-set-local-ip=\"true\"", + "external_ids:ovn-set-local-ip=\"true\" "+ + "external_ids:hostname=\"%s\"", nodeIP, interval, ofintval, ofintval, nodeName), }) @@ -527,12 +529,12 @@ var _ = Describe("Node", func() { "external_ids:ovn-remote-probe-interval=%d "+ "external_ids:ovn-openflow-probe-interval=%d "+ "other_config:bundle-idle-timeout=%d "+ - "external_ids:hostname=\"%s\" "+ "external_ids:ovn-is-interconn=false "+ "external_ids:ovn-monitor-all=true "+ "external_ids:ovn-ofctrl-wait-before-clear=0 "+ "external_ids:ovn-enable-lflow-cache=true "+ - "external_ids:ovn-set-local-ip=\"true\"", + "external_ids:ovn-set-local-ip=\"true\" "+ + "external_ids:hostname=\"%s\"", nodeIP, interval, ofintval, ofintval, nodeName), }) @@ -602,12 +604,12 @@ var _ = Describe("Node", func() { "external_ids:ovn-remote-probe-interval=%d "+ "external_ids:ovn-openflow-probe-interval=%d "+ "other_config:bundle-idle-timeout=%d "+ - "external_ids:hostname=\"%s\" "+ "external_ids:ovn-is-interconn=false "+ "external_ids:ovn-monitor-all=true "+ "external_ids:ovn-ofctrl-wait-before-clear=0 "+ "external_ids:ovn-enable-lflow-cache=true "+ - "external_ids:ovn-set-local-ip=\"true\"", + "external_ids:ovn-set-local-ip=\"true\" "+ + "external_ids:hostname=\"%s\"", nodeIP, interval, ofintval, ofintval, nodeName), }) diff --git a/go-controller/pkg/node/egress_service_test.go b/go-controller/pkg/node/egress_service_test.go index 83d314a61b..5c1b15ac52 100644 --- a/go-controller/pkg/node/egress_service_test.go +++ b/go-controller/pkg/node/egress_service_test.go @@ -5,7 +5,7 @@ import ( "fmt" "net" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" egressserviceapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1" @@ -281,8 +281,8 @@ var _ = Describe("Egress Service Operations", func() { expectedTables := map[string]util.FakeTable{ "nat": { "OVN-KUBE-EGRESS-SVC": []string{ - "-A OVN-KUBE-EGRESS-SVC -m mark --mark 0x3f0 -m comment --comment DoNotSNAT -j RETURN", - "-A OVN-KUBE-EGRESS-SVC -s 10.128.0.3 -m comment --comment namespace1/service1 -j SNAT --to-source 5.5.5.5", + "-m mark --mark 0x3f0 -m comment --comment DoNotSNAT -j RETURN", + "-s 10.128.0.3 -m comment --comment namespace1/service1 -j SNAT --to-source 5.5.5.5", }, }, "filter": {}, diff --git a/go-controller/pkg/node/gateway.go b/go-controller/pkg/node/gateway.go index c24a11596c..2b375d7690 100644 --- a/go-controller/pkg/node/gateway.go +++ b/go-controller/pkg/node/gateway.go @@ -6,21 +6,20 @@ import ( "sync" "time" + "github.com/safchain/ethtool" + kapi "k8s.io/api/core/v1" + discovery "k8s.io/api/discovery/v1" + "k8s.io/klog/v2" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + egressipv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/informer" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/retry" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" - util "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" utilerrors "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/selection" - - "github.com/safchain/ethtool" - kapi "k8s.io/api/core/v1" - discovery "k8s.io/api/discovery/v1" - "k8s.io/klog/v2" ) // Gateway responds to Service and Endpoint K8s events @@ -44,11 +43,12 @@ type gateway struct { // nodePortWatcherIptables is used in Shared GW mode to handle nodePort IPTable rules nodePortWatcherIptables informer.ServiceEventHandler // nodePortWatcher is used in Local+Shared GW modes to handle nodePort flows in shared OVS bridge - nodePortWatcher informer.ServiceAndEndpointsEventHandler - openflowManager *openflowManager - nodeIPManager *addressManager - initFunc func() error - readyFunc func() (bool, error) + nodePortWatcher informer.ServiceAndEndpointsEventHandler + openflowManager *openflowManager + nodeIPManager *addressManager + bridgeEIPAddrManager *bridgeEIPAddrManager + initFunc func() error + readyFunc func() (bool, error) servicesRetryFramework *retry.RetryFramework @@ -166,8 +166,12 @@ func (g *gateway) AddEndpointSlice(epSlice *discovery.EndpointSlice) error { var errors []error if g.loadBalancerHealthChecker != nil { - if err = g.loadBalancerHealthChecker.AddEndpointSlice(epSlice); err != nil { - errors = append(errors, err) + // Filter out objects without the default serviceName label to exclude mirrored EndpointSlices + // Only default EndpointSlices contain the discovery.LabelServiceName label + if !util.IsNetworkSegmentationSupportEnabled() || epSlice.Labels[discovery.LabelServiceName] != "" { + if err = g.loadBalancerHealthChecker.AddEndpointSlice(epSlice); err != nil { + errors = append(errors, err) + } } } if g.nodePortWatcher != nil { @@ -184,8 +188,12 @@ func (g *gateway) UpdateEndpointSlice(oldEpSlice, newEpSlice *discovery.Endpoint var errors []error if g.loadBalancerHealthChecker != nil { - if err = g.loadBalancerHealthChecker.UpdateEndpointSlice(oldEpSlice, newEpSlice); err != nil { - errors = append(errors, err) + // Filter out objects without the default serviceName label to exclude mirrored EndpointSlices + // Only default EndpointSlices contain the discovery.LabelServiceName label + if !util.IsNetworkSegmentationSupportEnabled() || newEpSlice.Labels[discovery.LabelServiceName] != "" { + if err = g.loadBalancerHealthChecker.UpdateEndpointSlice(oldEpSlice, newEpSlice); err != nil { + errors = append(errors, err) + } } } if g.nodePortWatcher != nil { @@ -202,8 +210,12 @@ func (g *gateway) DeleteEndpointSlice(epSlice *discovery.EndpointSlice) error { var errors []error if g.loadBalancerHealthChecker != nil { - if err = g.loadBalancerHealthChecker.DeleteEndpointSlice(epSlice); err != nil { - errors = append(errors, err) + // Filter out objects without the default serviceName label to exclude mirrored EndpointSlices + // Only default EndpointSlices contain the discovery.LabelServiceName label + if !util.IsNetworkSegmentationSupportEnabled() || epSlice.Labels[discovery.LabelServiceName] != "" { + if err = g.loadBalancerHealthChecker.DeleteEndpointSlice(epSlice); err != nil { + errors = append(errors, err) + } } } if g.nodePortWatcher != nil { @@ -212,7 +224,71 @@ func (g *gateway) DeleteEndpointSlice(epSlice *discovery.EndpointSlice) error { } } return utilerrors.Join(errors...) +} +func (g *gateway) AddEgressIP(eip *egressipv1.EgressIP) error { + if !util.IsNetworkSegmentationSupportEnabled() || !config.OVNKubernetesFeature.EnableInterconnect || config.Gateway.Mode == config.GatewayModeDisabled { + return nil + } + isSyncRequired, err := g.bridgeEIPAddrManager.addEgressIP(eip) + if err != nil { + return err + } + if isSyncRequired { + if err = g.Reconcile(); err != nil { + return fmt.Errorf("failed to sync gateway: %v", err) + } + g.openflowManager.requestFlowSync() + } + return nil +} + +func (g *gateway) UpdateEgressIP(oldEIP, newEIP *egressipv1.EgressIP) error { + if !util.IsNetworkSegmentationSupportEnabled() || !config.OVNKubernetesFeature.EnableInterconnect || config.Gateway.Mode == config.GatewayModeDisabled { + return nil + } + isSyncRequired, err := g.bridgeEIPAddrManager.updateEgressIP(oldEIP, newEIP) + if err != nil { + return err + } + if isSyncRequired { + if err = g.Reconcile(); err != nil { + return fmt.Errorf("failed to sync gateway: %v", err) + } + g.openflowManager.requestFlowSync() + } + return nil +} + +func (g *gateway) DeleteEgressIP(eip *egressipv1.EgressIP) error { + if !util.IsNetworkSegmentationSupportEnabled() || !config.OVNKubernetesFeature.EnableInterconnect || config.Gateway.Mode == config.GatewayModeDisabled { + return nil + } + isSyncRequired, err := g.bridgeEIPAddrManager.deleteEgressIP(eip) + if err != nil { + return err + } + if isSyncRequired { + if err = g.Reconcile(); err != nil { + return fmt.Errorf("failed to sync gateway: %v", err) + } + g.openflowManager.requestFlowSync() + } + return nil +} + +func (g *gateway) SyncEgressIP(eips []interface{}) error { + if !util.IsNetworkSegmentationSupportEnabled() || !config.OVNKubernetesFeature.EnableInterconnect || config.Gateway.Mode == config.GatewayModeDisabled { + return nil + } + if err := g.bridgeEIPAddrManager.syncEgressIP(eips); err != nil { + return err + } + if err := g.Reconcile(); err != nil { + return fmt.Errorf("failed to sync gateway: %v", err) + } + g.openflowManager.requestFlowSync() + return nil } func (g *gateway) Init(stopChan <-chan struct{}, wg *sync.WaitGroup) error { @@ -220,31 +296,24 @@ func (g *gateway) Init(stopChan <-chan struct{}, wg *sync.WaitGroup) error { g.wg = wg var err error - if err = g.initFunc(); err != nil { - return err - } + g.servicesRetryFramework = g.newRetryFrameworkNode(factory.ServiceForGatewayType) if _, err = g.servicesRetryFramework.WatchResource(); err != nil { return fmt.Errorf("gateway init failed to start watching services: %v", err) } endpointSlicesRetryFramework := g.newRetryFrameworkNode(factory.EndpointSliceForGatewayType) - - if util.IsNetworkSegmentationSupportEnabled() { - // Filter out objects without the default serviceName label to exclude mirrored EndpointSlices - // Only default EndpointSlices contain the discovery.LabelServiceName label - req, err := labels.NewRequirement(discovery.LabelServiceName, selection.Exists, nil) - if err != nil { - return err - } - if _, err = endpointSlicesRetryFramework.WatchResourceFiltered("", labels.NewSelector().Add(*req)); err != nil { - return fmt.Errorf("gateway init failed to start watching endpointslices: %v", err) - } - return nil - } if _, err = endpointSlicesRetryFramework.WatchResource(); err != nil { return fmt.Errorf("gateway init failed to start watching endpointslices: %v", err) } + + if config.OVNKubernetesFeature.EnableEgressIP { + eipRetryFramework := g.newRetryFrameworkNode(factory.EgressIPType) + if _, err = eipRetryFramework.WatchResource(); err != nil { + return fmt.Errorf("gateway init failed to start watching EgressIPs: %v", err) + } + } + return nil } @@ -450,6 +519,7 @@ type bridgeConfiguration struct { ofPortPhys string ofPortHost string netConfig map[string]*bridgeUDNConfiguration + eipMarkIPs *markIPsCache } // updateInterfaceIPAddresses sets and returns the bridge's current ips @@ -491,6 +561,7 @@ func bridgeForInterface(intfName, nodeName, physicalNetworkName string, gwIPs [] netConfig: map[string]*bridgeUDNConfiguration{ types.DefaultNetworkName: defaultNetConfig, }, + eipMarkIPs: newMarkIPsCache(), } gwIntf := intfName diff --git a/go-controller/pkg/node/gateway_egressip.go b/go-controller/pkg/node/gateway_egressip.go new file mode 100644 index 0000000000..08c7d7f60c --- /dev/null +++ b/go-controller/pkg/node/gateway_egressip.go @@ -0,0 +1,541 @@ +package node + +import ( + "encoding/json" + "fmt" + "math" + "net" + "sync" + + egressipv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1" + egressipinformers "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/egressip/v1" + egressiplisters "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/listers/egressip/v1" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/linkmanager" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + + "github.com/vishvananda/netlink" + "golang.org/x/sys/unix" + "k8s.io/apimachinery/pkg/util/sets" + corev1informers "k8s.io/client-go/informers/core/v1" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/retry" + "k8s.io/klog/v2" +) + +// markIPs contains packet mark and associated EgressIP IP for IPv4 / IPv6. Key is packet mark, value egress IP +type markIPs struct { + v4 map[int]string + v6 map[int]string +} + +func (e markIPs) insert(mark util.EgressIPMark, ip net.IP) { + if len(ip) == 0 || !mark.IsAvailable() || !mark.IsValid() { + klog.Errorf("Insertion of EgressIP config failed: invalid: IP %v, mark %v", ip, mark) + return + } + if ip.To4() != nil { + e.v4[mark.ToInt()] = ip.String() + } else if ip.To16() != nil { + e.v6[mark.ToInt()] = ip.String() + } +} + +func (e markIPs) delete(mark util.EgressIPMark, ip net.IP) { + if ip == nil || !mark.IsAvailable() || !mark.IsValid() { + klog.Errorf("Deletion of EgressIP config failed: invalid: IP %v, mark %v", ip, mark) + return + } + if ip.To4() != nil { + delete(e.v4, mark.ToInt()) + } else if ip.To16() != nil { + delete(e.v6, mark.ToInt()) + } +} + +func (e markIPs) containsIP(ip net.IP) bool { + if len(ip) == 0 { + klog.Errorf("Invalid IP argument therefore not checking EgressIP config cache: IP %v", ip) + return false + } + ipStr := ip.String() + var m map[int]string + if ip.To4() != nil { + m = e.v4 + } else if ip.To16() != nil { + m = e.v6 + } + for _, existingIP := range m { + if existingIP == ipStr { + return true + } + } + return false +} + +type markIPsCache struct { + mu sync.Mutex + hasSyncOnce bool + markToIPs markIPs + IPToMark map[string]int +} + +func newMarkIPsCache() *markIPsCache { + return &markIPsCache{ + mu: sync.Mutex{}, + markToIPs: markIPs{ + v4: make(map[int]string), + v6: make(map[int]string), + }, + IPToMark: map[string]int{}, + } +} + +func (mic *markIPsCache) IsIPPresent(ip net.IP) bool { + mic.mu.Lock() + defer mic.mu.Unlock() + if ip == nil { + return false + } + _, isFound := mic.IPToMark[ip.String()] + return isFound +} + +func (mic *markIPsCache) insertMarkIP(pktMark util.EgressIPMark, ip net.IP) { + mic.mu.Lock() + defer mic.mu.Unlock() + if ip == nil { + return + } + mic.markToIPs.insert(pktMark, ip) + mic.IPToMark[ip.String()] = pktMark.ToInt() +} + +func (mic *markIPsCache) deleteMarkIP(pktMark util.EgressIPMark, ip net.IP) { + mic.mu.Lock() + defer mic.mu.Unlock() + if ip == nil { + return + } + mic.markToIPs.delete(pktMark, ip) + delete(mic.IPToMark, ip.String()) +} + +func (mic *markIPsCache) replaceAll(markIPs markIPs) { + mic.mu.Lock() + mic.markToIPs = markIPs + for mark, ipv4 := range markIPs.v4 { + mic.IPToMark[ipv4] = mark + } + for mark, ipv6 := range markIPs.v6 { + mic.IPToMark[ipv6] = mark + } + mic.mu.Unlock() +} + +func (mic *markIPsCache) GetIPv4() map[int]string { + mic.mu.Lock() + defer mic.mu.Unlock() + dupe := make(map[int]string) + for key, value := range mic.markToIPs.v4 { + if value == "" { + continue + } + dupe[key] = value + } + return dupe +} + +func (mic *markIPsCache) GetIPv6() map[int]string { + mic.mu.Lock() + defer mic.mu.Unlock() + dupe := make(map[int]string) + for key, value := range mic.markToIPs.v6 { + if value == "" { + continue + } + dupe[key] = value + } + return dupe +} + +func (mic *markIPsCache) HasSyncdOnce() bool { + mic.mu.Lock() + defer mic.mu.Unlock() + return mic.hasSyncOnce +} + +func (mic *markIPsCache) setSyncdOnce() { + mic.mu.Lock() + mic.hasSyncOnce = true + mic.mu.Unlock() +} + +type bridgeEIPAddrManager struct { + nodeName string + bridgeName string + nodeAnnotationMu sync.Mutex + eIPLister egressiplisters.EgressIPLister + eIPInformer cache.SharedIndexInformer + nodeLister corev1listers.NodeLister + kube kube.Interface + addrManager *linkmanager.Controller + cache *markIPsCache +} + +// newBridgeEIPAddrManager manages EgressIP IPs that must be added to ovs bridges to support EgressIP feature for user +// defined networks. It saves the assigned IPs to its respective Node annotation in-order to understand which IPs it assigned +// prior to restarting. +// It provides the assigned IPs info node IP handler. Node IP handler must not consider assigned EgressIP IPs as possible node IPs. +// Openflow manager must generate the SNAT openflow conditional on packet marks and therefore needs access to EIP IPs and associated packet marks. +// bridgeEIPAddrManager must be able to force Openflow manager to resync if EgressIP assignment for the node changes. +func newBridgeEIPAddrManager(nodeName, bridgeName string, linkManager *linkmanager.Controller, + kube kube.Interface, eIPInformer egressipinformers.EgressIPInformer, nodeInformer corev1informers.NodeInformer) *bridgeEIPAddrManager { + return &bridgeEIPAddrManager{ + nodeName: nodeName, // k8 node name + bridgeName: bridgeName, // bridge name for which EIP IPs are managed + nodeAnnotationMu: sync.Mutex{}, // mu for updating Node annotation + eIPLister: eIPInformer.Lister(), + eIPInformer: eIPInformer.Informer(), + nodeLister: nodeInformer.Lister(), + kube: kube, + addrManager: linkManager, + cache: newMarkIPsCache(), // cache to store pkt mark -> EIP IP. + } +} + +func (g *bridgeEIPAddrManager) GetCache() *markIPsCache { + return g.cache +} + +func (g *bridgeEIPAddrManager) addEgressIP(eip *egressipv1.EgressIP) (bool, error) { + var isUpdated bool + if !util.IsEgressIPMarkSet(eip.Annotations) { + return isUpdated, nil + } + for _, status := range eip.Status.Items { + if status.Node != g.nodeName { + continue + } + ip, pktMark, err := parseEIPMarkIP(eip.Annotations, status.EgressIP) + if err != nil { + return isUpdated, fmt.Errorf("failed to add EgressIP gateway config because unable to extract config from EgressIP obj: %v", err) + } + // must always add to cache before adding IP because we want to inform node ip handler that this is not a valid node IP + g.cache.insertMarkIP(pktMark, ip) + if err = g.addIPToAnnotation(ip); err != nil { + return isUpdated, fmt.Errorf("failed to add EgressIP gateway config because unable to add EgressIP IP to Node annotation: %v", err) + } + if err = g.addIPBridge(ip); err != nil { + return isUpdated, fmt.Errorf("failed to add EgressIP gateway config because failed to add address to link: %v", err) + } + isUpdated = true + break // no need to continue as only one EIP IP is assigned to a node + } + return isUpdated, nil +} + +func (g *bridgeEIPAddrManager) updateEgressIP(oldEIP, newEIP *egressipv1.EgressIP) (bool, error) { + var isUpdated bool + // at most, one status item for this node will be found. + for _, oldStatus := range oldEIP.Status.Items { + if oldStatus.Node != g.nodeName { + continue + } + if !util.IsEgressIPMarkSet(oldEIP.Annotations) { + // this scenario may occur during upgrade from when ovn-k didn't apply marks to EIP objs + break + } + if util.IsItemInSlice(newEIP.Status.Items, oldStatus) { + // if one status entry exists in both status items, then nothing needs to be done because no status update. + // also, because at most only one status item can be assigned to a node, we can return early. + return isUpdated, nil + } + ip, pktMark, err := parseEIPMarkIP(oldEIP.Annotations, oldStatus.EgressIP) + if err != nil { + return isUpdated, fmt.Errorf("failed to update EgressIP SNAT for ext bridge cache because unable to extract config from old EgressIP obj: %v", err) + } + if err = g.deleteIPBridge(ip); err != nil { + return isUpdated, fmt.Errorf("failed to update EgressIP gateway config because failed to delete address from link: %v", err) + } + g.cache.deleteMarkIP(pktMark, ip) + if err = g.deleteIPsFromAnnotation(ip); err != nil { + return isUpdated, fmt.Errorf("failed to update EgressIP gateway config because unable to delete EgressIP IP from Node annotation: %v", err) + } + isUpdated = true + break + } + for _, newStatus := range newEIP.Status.Items { + if newStatus.Node != g.nodeName { + continue + } + if !util.IsEgressIPMarkSet(newEIP.Annotations) { + // this scenario may occur during upgrade from when ovn-k didn't apply marks to EIP objs + return isUpdated, nil + } + ip, pktMark, err := parseEIPMarkIP(newEIP.Annotations, newStatus.EgressIP) + if err != nil { + return isUpdated, fmt.Errorf("failed to update EgressIP gateway config because unable to extract config from EgressIP obj: %v", err) + } + // must always add to OF cache before adding IP because we want to inform node ip handler that this is not a valid node IP + g.cache.insertMarkIP(pktMark, ip) + if err = g.addIPToAnnotation(ip); err != nil { + return isUpdated, fmt.Errorf("failed to update EgressIP gateway config because unable to add EgressIP IP to Node annotation: %v", err) + } + if err = g.addIPBridge(ip); err != nil { + return isUpdated, fmt.Errorf("failed to update EgressIP gateway config because failed to add address to link: %v", err) + } + isUpdated = true + break + } + return isUpdated, nil +} + +func (g *bridgeEIPAddrManager) deleteEgressIP(eip *egressipv1.EgressIP) (bool, error) { + var isUpdated bool + if !util.IsEgressIPMarkSet(eip.Annotations) { + return isUpdated, nil + } + for _, status := range eip.Status.Items { + if status.Node != g.nodeName { + continue + } + if !util.IsEgressIPMarkSet(eip.Annotations) { + continue + } + ip, pktMark, err := parseEIPMarkIP(eip.Annotations, status.EgressIP) + if err != nil { + return isUpdated, fmt.Errorf("failed to delete EgressIP gateway config because unable to extract config from EgressIP obj: %v", err) + } + if err = g.deleteIPBridge(ip); err != nil { + return isUpdated, fmt.Errorf("failed to delete EgressIP gateway config because failed to delete address from link: %v", err) + } + g.cache.deleteMarkIP(pktMark, ip) + if err = g.deleteIPsFromAnnotation(ip); err != nil { + return isUpdated, fmt.Errorf("failed to delete EgressIP gateway config because failed to delete EgressIP IP from Node annotation: %v", err) + } + isUpdated = true + break // no need to continue as only one EIP IP is assigned per node + } + return isUpdated, nil +} + +func (g *bridgeEIPAddrManager) syncEgressIP(objs []interface{}) error { + // caller must synchronise + annotIPs, err := g.getAnnotationIPs() + if err != nil { + return fmt.Errorf("failed to sync EgressIP gateway config because unable to get Node annotation: %v", err) + } + configs := markIPs{v4: map[int]string{}, v6: map[int]string{}} + for _, obj := range objs { + eip, ok := obj.(*egressipv1.EgressIP) + if !ok { + return fmt.Errorf("expected EgressIP type but received %T", obj) + } + // This may happen during upgrade when node controllers upgrade before cluster manager upgrades when cluster manager doesn't contain func + // to add a pkt mark to EgressIPs. + if !util.IsEgressIPMarkSet(eip.Annotations) { + continue + } + for _, status := range eip.Status.Items { + if status.Node != g.nodeName { + continue + } + if ip, pktMark, err := parseEIPMarkIP(eip.Annotations, status.EgressIP); err != nil { + klog.Errorf("Failed to sync EgressIP %s gateway config because unable to extract config from EIP obj: %v", eip.Name, err) + } else { + configs.insert(pktMark, ip) + if err = g.addIPToAnnotation(ip); err != nil { + return fmt.Errorf("failed to sync EgressIP gateway config because unable to add EgressIP IP to Node annotation: %v", err) + } + if err = g.addIPBridge(ip); err != nil { + return fmt.Errorf("failed to sync EgressIP gateway config because failed to add address to link: %v", err) + } + } + break + } + } + ipsToDel := make([]net.IP, 0) + for _, annotIP := range annotIPs { + if configs.containsIP(annotIP) { + continue + } + if err = g.deleteIPBridge(annotIP); err != nil { + klog.Errorf("Failed to delete stale EgressIP IP %s from gateway: %v", annotIP, err) + continue + } + ipsToDel = append(ipsToDel, annotIP) + } + if len(ipsToDel) > 0 { + if err = g.deleteIPsFromAnnotation(ipsToDel...); err != nil { + return fmt.Errorf("failed to delete EgressIP IPs from Node annotation: %v", err) + } + } + g.cache.replaceAll(configs) + g.cache.setSyncdOnce() + return nil +} + +// addIPToAnnotation adds an address to the collection of existing addresses stored in the nodes annotation. Caller +// may repeat addition of addresses without care for duplicate addresses being added. +func (g *bridgeEIPAddrManager) addIPToAnnotation(candidateIP net.IP) error { + g.nodeAnnotationMu.Lock() + defer g.nodeAnnotationMu.Unlock() + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + node, err := g.nodeLister.Get(g.nodeName) + if err != nil { + return err + } + existingIPsStr, err := util.ParseNodeBridgeEgressIPsAnnotation(node) + if err != nil { + if util.IsAnnotationNotSetError(err) { + existingIPsStr = make([]string, 0) + } else { + return fmt.Errorf("failed to parse annotation key %q from node object: %v", util.OVNNodeBridgeEgressIPs, err) + } + } + existingIPsSet := sets.New[string](existingIPsStr...) + candidateIPStr := candidateIP.String() + if existingIPsSet.Has(candidateIPStr) { + return nil + } + patch, err := json.Marshal(existingIPsSet.Insert(candidateIPStr).UnsortedList()) + if err != nil { + return err + } + node.Annotations[util.OVNNodeBridgeEgressIPs] = string(patch) + return g.kube.UpdateNodeStatus(node) + }) +} + +// deleteIPsFromAnnotation deletes address from annotation. If multiple users, callers must synchronise. +// deletion of address that doesn't exist will not cause an error. +func (g *bridgeEIPAddrManager) deleteIPsFromAnnotation(candidateIPs ...net.IP) error { + g.nodeAnnotationMu.Lock() + defer g.nodeAnnotationMu.Unlock() + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + node, err := g.nodeLister.Get(g.nodeName) + if err != nil { + return err + } + existingIPsStr, err := util.ParseNodeBridgeEgressIPsAnnotation(node) + if err != nil { + if util.IsAnnotationNotSetError(err) { + existingIPsStr = make([]string, 0) + } else { + return fmt.Errorf("failed to parse annotation key %q from node object: %v", util.OVNNodeBridgeEgressIPs, err) + } + } + if len(existingIPsStr) == 0 { + return nil + } + existingIPsSet := sets.New[string](existingIPsStr...) + candidateIPsStr := getIPsStr(candidateIPs...) + if !existingIPsSet.HasAny(candidateIPsStr...) { + return nil + } + existingIPsSet.Delete(candidateIPsStr...) + patch, err := json.Marshal(existingIPsSet.UnsortedList()) + if err != nil { + return err + } + node.Annotations[util.OVNNodeBridgeEgressIPs] = string(patch) + return g.kube.UpdateNodeStatus(node) + }) +} + +func (g *bridgeEIPAddrManager) addIPBridge(ip net.IP) error { + link, err := util.GetNetLinkOps().LinkByName(g.bridgeName) + if err != nil { + return fmt.Errorf("failed to get link obj by name %s: %v", g.bridgeName, err) + } + return g.addrManager.AddAddress(getEIPBridgeNetlinkAddress(ip, link.Attrs().Index)) +} + +func (g *bridgeEIPAddrManager) deleteIPBridge(ip net.IP) error { + link, err := util.GetNetLinkOps().LinkByName(g.bridgeName) + if err != nil { + return fmt.Errorf("failed to get link obj by name %s: %v", g.bridgeName, err) + } + return g.addrManager.DelAddress(getEIPBridgeNetlinkAddress(ip, link.Attrs().Index)) +} + +// getAnnotationIPs retrieves the egress IP annotation from the current node Nodes object. If multiple users, callers must synchronise. +// if annotation isn't present, empty set is returned +func (g *bridgeEIPAddrManager) getAnnotationIPs() ([]net.IP, error) { + node, err := g.nodeLister.Get(g.nodeName) + if err != nil { + return nil, fmt.Errorf("failed to get node %s from lister: %v", g.nodeName, err) + } + ipsStr, err := util.ParseNodeBridgeEgressIPsAnnotation(node) + if err != nil { + if util.IsAnnotationNotSetError(err) { + ipsStr = make([]string, 0) + } else { + return nil, err + } + } + ips := make([]net.IP, 0, len(ipsStr)) + for _, ipStr := range ipsStr { + ip := net.ParseIP(ipStr) + if ip == nil { + return nil, fmt.Errorf("failed to parse IPs from Node annotation %s: %v", util.OVNNodeBridgeEgressIPs, ipsStr) + } + ips = append(ips, ip) + } + return ips, nil +} + +func parseEIPMarkIP(annotations map[string]string, eip string) (net.IP, util.EgressIPMark, error) { + pktMark, err := util.ParseEgressIPMark(annotations) + if err != nil { + return nil, pktMark, fmt.Errorf("failed to extract packet mark from EgressIP annotations: %v", err) + } + // status update and pkt mark should be configured as one operation by cluster manager + if !pktMark.IsAvailable() { + return nil, pktMark, fmt.Errorf("packet mark is not set") + } + if !pktMark.IsValid() { + return nil, pktMark, fmt.Errorf("packet mark is not valid") + } + ip := net.ParseIP(eip) + if ip == nil { + return nil, pktMark, fmt.Errorf("invalid IP") + } + return ip, pktMark, nil +} + +func getIPsStr(ips ...net.IP) []string { + ipsStr := make([]string, 0, len(ips)) + for _, ip := range ips { + ipsStr = append(ipsStr, ip.String()) + } + return ipsStr +} + +func getEIPBridgeNetlinkAddress(ip net.IP, ifindex int) netlink.Addr { + return netlink.Addr{ + IPNet: &net.IPNet{IP: ip, Mask: util.GetIPFullMask(ip)}, + Flags: getEIPNetlinkAddressFlag(ip), + Scope: int(netlink.SCOPE_LINK), + ValidLft: getEIPNetlinkAddressValidLft(ip), + LinkIndex: ifindex, + } +} + +func getEIPNetlinkAddressFlag(ip net.IP) int { + // isV6? + if ip.To4() == nil && ip.To16() != nil { + return unix.IFA_F_NODAD + } + return 0 +} + +func getEIPNetlinkAddressValidLft(ip net.IP) int { + // isV6? + if ip.To4() == nil && ip.To16() != nil { + return math.MaxUint32 + } + return 0 +} diff --git a/go-controller/pkg/node/gateway_egressip_test.go b/go-controller/pkg/node/gateway_egressip_test.go new file mode 100644 index 0000000000..c0c292b724 --- /dev/null +++ b/go-controller/pkg/node/gateway_egressip_test.go @@ -0,0 +1,404 @@ +package node + +import ( + "fmt" + "net" + "strings" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + egressipv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/linkmanager" + netlink_mocks "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/mocks/github.com/vishvananda/netlink" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/mocks" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + "github.com/vishvananda/netlink" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" +) + +var _ = ginkgo.Describe("Gateway EgressIP", func() { + + const ( + nodeName = "ovn-worker" + bridgeName = "breth0" + bridgeLinkIndex = 10 + ipV4Addr = "192.168.1.5" + ipV4Addr2 = "192.168.1.6" + ipV4Addr3 = "192.168.1.7" + mark = "50000" + mark2 = "50001" + mark3 = "50002" + emptyAnnotation = "" + ) + + var ( + nlMock *mocks.NetLinkOps + nlLinkMock *netlink_mocks.Link + ) + + ginkgo.BeforeEach(func() { + // Restore global default values before each testcase + err := config.PrepareTestConfig() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + nlMock = new(mocks.NetLinkOps) + nlLinkMock = new(netlink_mocks.Link) + util.SetNetLinkOpMockInst(nlMock) + }) + + ginkgo.AfterEach(func() { + util.ResetNetLinkOpMockInst() + }) + + ginkgo.Context("add EgressIP", func() { + ginkgo.It("configures annotation and bridge when EIP assigned to node", func() { + nlLinkMock.On("Attrs").Return(&netlink.LinkAttrs{Name: bridgeName, Index: bridgeLinkIndex}, nil) + nlMock.On("LinkByName", bridgeName).Return(nlLinkMock, nil) + nlMock.On("LinkByIndex", bridgeLinkIndex).Return(nlLinkMock, nil) + nlMock.On("LinkList").Return([]netlink.Link{nlLinkMock}, nil) + nlMock.On("AddrList", nlLinkMock, 0).Return([]netlink.Addr{}, nil) + nlMock.On("AddrAdd", nlLinkMock, getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr), bridgeLinkIndex)).Return(nil) + addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, emptyAnnotation) + defer stopFn() + eip := getEIPAssignedToNode(nodeName, mark, ipV4Addr) + isUpdated, err := addrMgr.addEgressIP(eip) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") + gomega.Expect(isUpdated).Should(gomega.BeTrue()) + node, err := addrMgr.kube.GetNode(nodeName) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") + gomega.Expect(parseEIPsFromAnnotation(node)).Should(gomega.ConsistOf(ipV4Addr)) + gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, + getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr), bridgeLinkIndex))).Should(gomega.BeTrue()) + }) + + ginkgo.It("doesn't configure or fail when annotation mark isn't found", func() { + nlMock.On("AddrAdd", nlLinkMock, getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr), bridgeLinkIndex)).Return(nil) + addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, emptyAnnotation) + defer stopFn() + eip := getEIPAssignedToNode(nodeName, "", ipV4Addr) + isUpdated, err := addrMgr.addEgressIP(eip) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") + gomega.Expect(isUpdated).Should(gomega.BeFalse()) + node, err := addrMgr.kube.GetNode(nodeName) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") + gomega.Expect(parseEIPsFromAnnotation(node)).ShouldNot(gomega.ConsistOf(ipV4Addr)) + gomega.Expect(nlMock.AssertNotCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, + getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr), bridgeLinkIndex))).Should(gomega.BeTrue()) + }) + + ginkgo.It("fails when invalid annotation mark", func() { + nlMock.On("AddrAdd", nlLinkMock, getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr), bridgeLinkIndex)).Return(nil) + addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, emptyAnnotation) + defer stopFn() + eip := getEIPAssignedToNode(nodeName, "not-an-integer", ipV4Addr) + isUpdated, err := addrMgr.addEgressIP(eip) + gomega.Expect(err).Should(gomega.HaveOccurred(), "should process a valid EgressIP") + gomega.Expect(isUpdated).Should(gomega.BeFalse()) + node, err := addrMgr.kube.GetNode(nodeName) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") + gomega.Expect(parseEIPsFromAnnotation(node)).ShouldNot(gomega.ConsistOf(ipV4Addr)) + gomega.Expect(nlMock.AssertNotCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, + getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr), bridgeLinkIndex))).Should(gomega.BeTrue()) + }) + + ginkgo.It("configures annotations with existing entries", func() { + nlLinkMock.On("Attrs").Return(&netlink.LinkAttrs{Name: bridgeName, Index: bridgeLinkIndex}, nil) + nlMock.On("LinkByName", bridgeName).Return(nlLinkMock, nil) + nlMock.On("LinkByIndex", bridgeLinkIndex).Return(nlLinkMock, nil) + nlMock.On("LinkList").Return([]netlink.Link{nlLinkMock}, nil) + nlMock.On("AddrList", nlLinkMock, 0).Return([]netlink.Addr{}, nil) + nlMock.On("AddrAdd", nlLinkMock, getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr), bridgeLinkIndex)).Return(nil) + addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, generateAnnotFromIPs(ipV4Addr2)) + defer stopFn() + eip := getEIPAssignedToNode(nodeName, mark, ipV4Addr) + isUpdated, err := addrMgr.addEgressIP(eip) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") + gomega.Expect(isUpdated).Should(gomega.BeTrue()) + node, err := addrMgr.kube.GetNode(nodeName) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") + gomega.Expect(parseEIPsFromAnnotation(node)).Should(gomega.ConsistOf(ipV4Addr, ipV4Addr2)) + gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, + getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr), bridgeLinkIndex))).Should(gomega.BeTrue()) + }) + }) + + ginkgo.Context("update EgressIP", func() { + ginkgo.It("configures when EgressIP is not assigned to the node", func() { + nlLinkMock.On("Attrs").Return(&netlink.LinkAttrs{Name: bridgeName, Index: bridgeLinkIndex}, nil) + nlMock.On("LinkByName", bridgeName).Return(nlLinkMock, nil) + nlMock.On("LinkByIndex", bridgeLinkIndex).Return(nlLinkMock, nil) + nlMock.On("LinkList").Return([]netlink.Link{nlLinkMock}, nil) + nlMock.On("AddrList", nlLinkMock, 0).Return([]netlink.Addr{}, nil) + nlMock.On("AddrAdd", nlLinkMock, getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr), bridgeLinkIndex)).Return(nil) + addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, emptyAnnotation) + defer stopFn() + assignedEIP := getEIPAssignedToNode(nodeName, mark, ipV4Addr) + unassignedEIP := getEIPNotAssignedToNode(mark, ipV4Addr) + isUpdated, err := addrMgr.updateEgressIP(unassignedEIP, assignedEIP) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") + gomega.Expect(isUpdated).Should(gomega.BeTrue()) + node, err := addrMgr.kube.GetNode(nodeName) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") + gomega.Expect(parseEIPsFromAnnotation(node)).Should(gomega.ConsistOf(ipV4Addr)) + gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, + getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr), bridgeLinkIndex))).Should(gomega.BeTrue()) + }) + + ginkgo.It("removes EgressIP previously assigned", func() { + nlLinkMock.On("Attrs").Return(&netlink.LinkAttrs{Name: bridgeName, Index: bridgeLinkIndex}, nil) + nlMock.On("LinkByName", bridgeName).Return(nlLinkMock, nil) + nlMock.On("LinkByIndex", bridgeLinkIndex).Return(nlLinkMock, nil) + nlMock.On("LinkList").Return([]netlink.Link{nlLinkMock}, nil) + nlMock.On("AddrList", nlLinkMock, 0).Return([]netlink.Addr{}, nil) + nlMock.On("AddrAdd", nlLinkMock, getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr), bridgeLinkIndex)).Return(nil) + nlMock.On("AddrDel", nlLinkMock, getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr), bridgeLinkIndex)).Return(nil) + addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, emptyAnnotation) + defer stopFn() + assignedEIP := getEIPAssignedToNode(nodeName, mark, ipV4Addr) + unassignedEIP := getEIPNotAssignedToNode(mark, ipV4Addr) + isUpdated, err := addrMgr.updateEgressIP(unassignedEIP, assignedEIP) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") + gomega.Expect(isUpdated).Should(gomega.BeTrue()) + isUpdated, err = addrMgr.updateEgressIP(assignedEIP, unassignedEIP) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") + gomega.Expect(isUpdated).Should(gomega.BeTrue()) + node, err := addrMgr.kube.GetNode(nodeName) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") + gomega.Expect(parseEIPsFromAnnotation(node)).ShouldNot(gomega.ConsistOf(ipV4Addr)) + gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, + getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr), bridgeLinkIndex))).Should(gomega.BeTrue()) + gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrDel", nlLinkMock, + getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr), bridgeLinkIndex))).Should(gomega.BeTrue()) + }) + + ginkgo.It("reconfigures from an old to a new IP", func() { + nlLinkMock.On("Attrs").Return(&netlink.LinkAttrs{Name: bridgeName, Index: bridgeLinkIndex}, nil) + nlMock.On("LinkByName", bridgeName).Return(nlLinkMock, nil) + nlMock.On("LinkByIndex", bridgeLinkIndex).Return(nlLinkMock, nil) + nlMock.On("LinkList").Return([]netlink.Link{nlLinkMock}, nil) + nlMock.On("AddrList", nlLinkMock, 0).Return([]netlink.Addr{}, nil) + nlMock.On("AddrAdd", nlLinkMock, getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr), bridgeLinkIndex)).Return(nil) + nlMock.On("AddrAdd", nlLinkMock, getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr2), bridgeLinkIndex)).Return(nil) + nlMock.On("AddrDel", nlLinkMock, getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr), bridgeLinkIndex)).Return(nil) + addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, emptyAnnotation) + defer stopFn() + unassignedEIP := getEIPNotAssignedToNode(mark, ipV4Addr) + assignedEIP1 := getEIPAssignedToNode(nodeName, mark, ipV4Addr) + assignedEIP2 := getEIPAssignedToNode(nodeName, mark2, ipV4Addr2) + isUpdated, err := addrMgr.updateEgressIP(unassignedEIP, assignedEIP1) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") + gomega.Expect(isUpdated).Should(gomega.BeTrue()) + isUpdated, err = addrMgr.updateEgressIP(assignedEIP1, assignedEIP2) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") + gomega.Expect(isUpdated).Should(gomega.BeTrue()) + node, err := addrMgr.kube.GetNode(nodeName) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") + gomega.Expect(parseEIPsFromAnnotation(node)).Should(gomega.ConsistOf(ipV4Addr2)) + gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, + getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr), bridgeLinkIndex))).Should(gomega.BeTrue()) + gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, + getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr2), bridgeLinkIndex))).Should(gomega.BeTrue()) + gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrDel", nlLinkMock, + getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr), bridgeLinkIndex))).Should(gomega.BeTrue()) + }) + }) + + ginkgo.Context("delete EgressIP", func() { + ginkgo.It("removes configuration from annotation and bridge when EIP assigned to node is deleted", func() { + nlLinkMock.On("Attrs").Return(&netlink.LinkAttrs{Name: bridgeName, Index: bridgeLinkIndex}, nil) + nlMock.On("LinkByName", bridgeName).Return(nlLinkMock, nil) + nlMock.On("LinkByIndex", bridgeLinkIndex).Return(nlLinkMock, nil) + nlMock.On("LinkList").Return([]netlink.Link{nlLinkMock}, nil) + nlMock.On("AddrList", nlLinkMock, 0).Return([]netlink.Addr{}, nil) + nlMock.On("AddrAdd", nlLinkMock, getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr), bridgeLinkIndex)).Return(nil) + nlMock.On("AddrDel", nlLinkMock, getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr), bridgeLinkIndex)).Return(nil) + addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, emptyAnnotation) + defer stopFn() + eip := getEIPAssignedToNode(nodeName, mark, ipV4Addr) + isUpdated, err := addrMgr.addEgressIP(eip) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") + gomega.Expect(isUpdated).Should(gomega.BeTrue()) + isUpdated, err = addrMgr.deleteEgressIP(eip) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") + gomega.Expect(isUpdated).Should(gomega.BeTrue()) + node, err := addrMgr.kube.GetNode(nodeName) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") + gomega.Expect(parseEIPsFromAnnotation(node)).ShouldNot(gomega.ConsistOf(ipV4Addr)) + gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, + getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr), bridgeLinkIndex))).Should(gomega.BeTrue()) + gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrDel", nlLinkMock, + getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr), bridgeLinkIndex))).Should(gomega.BeTrue()) + }) + + ginkgo.It("does not update when EIP is deleted that wasn't assigned to the node", func() { + addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, generateAnnotFromIPs(ipV4Addr2)) + defer stopFn() + eip := getEIPNotAssignedToNode(mark, ipV4Addr) + isUpdated, err := addrMgr.deleteEgressIP(eip) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") + gomega.Expect(isUpdated).Should(gomega.BeFalse()) + node, err := addrMgr.kube.GetNode(nodeName) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") + gomega.Expect(parseEIPsFromAnnotation(node)).Should(gomega.ConsistOf(ipV4Addr2)) + gomega.Expect(nlMock.AssertNotCalled(ginkgo.GinkgoT(), "AddrDel", nlLinkMock, + getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr), bridgeLinkIndex))).Should(gomega.BeTrue()) + }) + }) + + ginkgo.Context("sync EgressIP", func() { + ginkgo.It("configures multiple EgressIPs assigned to the node", func() { + nlLinkMock.On("Attrs").Return(&netlink.LinkAttrs{Name: bridgeName, Index: bridgeLinkIndex}, nil) + nlMock.On("LinkByName", bridgeName).Return(nlLinkMock, nil) + nlMock.On("LinkByIndex", bridgeLinkIndex).Return(nlLinkMock, nil) + nlMock.On("LinkList").Return([]netlink.Link{nlLinkMock}, nil) + nlMock.On("AddrList", nlLinkMock, 0).Return([]netlink.Addr{}, nil) + nlMock.On("AddrAdd", nlLinkMock, getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr), bridgeLinkIndex)).Return(nil) + nlMock.On("AddrAdd", nlLinkMock, getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr2), bridgeLinkIndex)).Return(nil) + addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, emptyAnnotation) + defer stopFn() + eipAssigned1 := getEIPAssignedToNode(nodeName, mark, ipV4Addr) + eipAssigned2 := getEIPAssignedToNode(nodeName, mark2, ipV4Addr2) + eipUnassigned3 := getEIPNotAssignedToNode(mark3, ipV4Addr3) + err := addrMgr.syncEgressIP([]interface{}{eipAssigned1, eipAssigned2, eipUnassigned3}) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process valid EgressIPs") + node, err := addrMgr.kube.GetNode(nodeName) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") + gomega.Expect(parseEIPsFromAnnotation(node)).Should(gomega.ConsistOf(ipV4Addr, ipV4Addr2)) + gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, + getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr), bridgeLinkIndex))).Should(gomega.BeTrue()) + gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, + getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr2), bridgeLinkIndex))).Should(gomega.BeTrue()) + }) + + ginkgo.It("delete previous configuration", func() { + nlLinkMock.On("Attrs").Return(&netlink.LinkAttrs{Name: bridgeName, Index: bridgeLinkIndex}, nil) + nlMock.On("LinkByName", bridgeName).Return(nlLinkMock, nil) + nlMock.On("LinkByIndex", bridgeLinkIndex).Return(nlLinkMock, nil) + nlMock.On("LinkList").Return([]netlink.Link{nlLinkMock}, nil) + nlMock.On("AddrList", nlLinkMock, 0).Return([]netlink.Addr{}, nil) + nlMock.On("AddrAdd", nlLinkMock, getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr), bridgeLinkIndex)).Return(nil) + nlMock.On("AddrAdd", nlLinkMock, getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr2), bridgeLinkIndex)).Return(nil) + nlMock.On("AddrDel", nlLinkMock, getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr3), bridgeLinkIndex)).Return(nil) + addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, generateAnnotFromIPs(ipV4Addr3)) // previously configured IP + defer stopFn() + eipAssigned1 := getEIPAssignedToNode(nodeName, mark, ipV4Addr) + eipAssigned2 := getEIPAssignedToNode(nodeName, mark2, ipV4Addr2) + err := addrMgr.syncEgressIP([]interface{}{eipAssigned1, eipAssigned2}) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process valid EgressIPs") + node, err := addrMgr.kube.GetNode(nodeName) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") + gomega.Expect(parseEIPsFromAnnotation(node)).Should(gomega.ConsistOf(ipV4Addr, ipV4Addr2)) + gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, + getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr), bridgeLinkIndex))).Should(gomega.BeTrue()) + gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, + getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr2), bridgeLinkIndex))).Should(gomega.BeTrue()) + gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrDel", nlLinkMock, + getEIPBridgeNetlinkAddressPtr(net.ParseIP(ipV4Addr3), bridgeLinkIndex))).Should(gomega.BeTrue()) + }) + + ginkgo.It("no update or failure when mark is not set", func() { + addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, emptyAnnotation) // previously configured IP + defer stopFn() + eipAssigned := getEIPAssignedToNode(nodeName, "", ipV4Addr) + err := addrMgr.syncEgressIP([]interface{}{eipAssigned}) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process valid EgressIPs") + node, err := addrMgr.kube.GetNode(nodeName) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") + gomega.Expect(len(parseEIPsFromAnnotation(node))).Should(gomega.BeZero()) + }) + }) +}) + +func initBridgeEIPAddrManager(nodeName, bridgeName string, bridgeEIPAnnot string) (*bridgeEIPAddrManager, func()) { + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: nodeName, Annotations: map[string]string{}}, + } + if bridgeEIPAnnot != "" { + node.Annotations[util.OVNNodeBridgeEgressIPs] = bridgeEIPAnnot + } + client := fake.NewSimpleClientset(node) + watchFactory, err := factory.NewNodeWatchFactory(&util.OVNNodeClientset{KubeClient: client}, nodeName) + gomega.Expect(watchFactory.Start()).Should(gomega.Succeed(), "watch factory should start") + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "watch factory creation must succeed") + linkManager := linkmanager.NewController(nodeName, true, true, nil) + return newBridgeEIPAddrManager(nodeName, bridgeName, linkManager, &kube.Kube{KClient: client}, watchFactory.EgressIPInformer(), watchFactory.NodeCoreInformer()), + watchFactory.Shutdown +} + +func getEIPAssignedToNode(nodeName, mark, assignedIP string) *egressipv1.EgressIP { + eip := &egressipv1.EgressIP{ + ObjectMeta: metav1.ObjectMeta{Name: "bridge-addr-mgr-test", Annotations: map[string]string{}}, + Spec: egressipv1.EgressIPSpec{ + EgressIPs: []string{assignedIP}, + }, + Status: egressipv1.EgressIPStatus{ + Items: []egressipv1.EgressIPStatusItem{ + { + Node: nodeName, + EgressIP: assignedIP, + }, + }, + }, + } + if mark != "" { + eip.Annotations[util.EgressIPMarkAnnotation] = mark + } + return eip +} + +func getEIPNotAssignedToNode(mark, ip string) *egressipv1.EgressIP { + eip := &egressipv1.EgressIP{ + ObjectMeta: metav1.ObjectMeta{Name: "bridge-addr-mgr-test", Annotations: map[string]string{}}, + Spec: egressipv1.EgressIPSpec{ + EgressIPs: []string{ip}, + }, + Status: egressipv1.EgressIPStatus{ + Items: []egressipv1.EgressIPStatusItem{ + { + Node: "different-node", + EgressIP: ip, + }, + }, + }, + } + if mark != "" { + eip.Annotations[util.EgressIPMarkAnnotation] = mark + } + return eip +} + +func generateAnnotFromIPs(ips ...string) string { + ipsWithQuotes := make([]string, 0, len(ips)) + for _, ip := range ips { + if ip == "" { + continue + } + if net.ParseIP(ip) == nil { + panic("invalid IP") + } + ipsWithQuotes = append(ipsWithQuotes, fmt.Sprintf("\"%s\"", ip)) + } + return fmt.Sprintf("[%s]", strings.Join(ipsWithQuotes, ",")) +} + +func getEIPBridgeNetlinkAddressPtr(ip net.IP, ifindex int) *netlink.Addr { + addr := getEIPBridgeNetlinkAddress(ip, ifindex) + return &addr +} + +func parseEIPsFromAnnotation(node *corev1.Node) []string { + ips, err := util.ParseNodeBridgeEgressIPsAnnotation(node) + if err != nil { + if util.IsAnnotationNotSetError(err) { + ips = make([]string, 0) + } else { + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should be able to detect if annotation is or not") + } + } + return ips +} diff --git a/go-controller/pkg/node/gateway_init.go b/go-controller/pkg/node/gateway_init.go index 595160c462..80cb08ecfa 100644 --- a/go-controller/pkg/node/gateway_init.go +++ b/go-controller/pkg/node/gateway_init.go @@ -5,6 +5,7 @@ import ( "fmt" "net" "strings" + "time" "github.com/vishvananda/netlink" "k8s.io/klog/v2" @@ -13,9 +14,10 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" + nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/routemanager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" - util "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) // bridgedGatewayNodeSetup enables forwarding on bridge interface, sets up the physical network name mappings for the bridge, @@ -311,26 +313,23 @@ func configureSvcRouteViaInterface(routeManager *routemanager.Controller, iface return nil } -func (nc *DefaultNodeNetworkController) initGateway(subnets []*net.IPNet, nodeAnnotator kube.Annotator, - waiter *startupWaiter, managementPortConfig *managementPortConfig, kubeNodeIP net.IP) error { - klog.Info("Initializing Gateway Functionality") +// initGatewayPreStart executes the first part of the gateway initialization for the node. +// It creates the gateway object, the node IP manager, openflow manager and node port watcher +// once OVN controller is ready and the patch port exists for this node. +// It is split from initGatewayMainStart to allow for the gateway object and openflow manager to be created +// before the rest of the gateway functionality is started. +func (nc *DefaultNodeNetworkController) initGatewayPreStart(subnets []*net.IPNet, nodeAnnotator kube.Annotator, + managementPortConfig *managementPortConfig, kubeNodeIP net.IP) (*gateway, error) { + + klog.Info("Initializing Gateway Functionality for Gateway PreStart") var err error var ifAddrs []*net.IPNet - var loadBalancerHealthChecker *loadBalancerHealthChecker - var portClaimWatcher *portClaimWatcher - - if config.Gateway.NodeportEnable && config.OvnKubeNode.Mode == types.NodeModeFull { - loadBalancerHealthChecker = newLoadBalancerHealthChecker(nc.name, nc.watchFactory) - portClaimWatcher, err = newPortClaimWatcher(nc.recorder) - if err != nil { - return err - } - } + waiter := newStartupWaiter() gatewayNextHops, gatewayIntf, err := getGatewayNextHops() if err != nil { - return err + return nil, err } egressGWInterface := "" @@ -340,7 +339,7 @@ func (nc *DefaultNodeNetworkController) initGateway(subnets []*net.IPNet, nodeAn ifAddrs, err = getNetworkInterfaceIPAddresses(gatewayIntf) if err != nil { - return err + return nil, err } // For DPU need to use the host IP addr which currently is assumed to be K8s Node cluster @@ -348,7 +347,7 @@ func (nc *DefaultNodeNetworkController) initGateway(subnets []*net.IPNet, nodeAn if config.OvnKubeNode.Mode == types.NodeModeDPU { ifAddrs, err = getDPUHostPrimaryIPAddresses(kubeNodeIP, ifAddrs) if err != nil { - return err + return nil, err } } @@ -358,14 +357,10 @@ func (nc *DefaultNodeNetworkController) initGateway(subnets []*net.IPNet, nodeAn var gw *gateway switch config.Gateway.Mode { - case config.GatewayModeLocal: - klog.Info("Preparing Local Gateway") - gw, err = newLocalGateway(nc.name, subnets, gatewayNextHops, gatewayIntf, egressGWInterface, ifAddrs, nodeAnnotator, - managementPortConfig, nc.Kube, nc.watchFactory, nc.routeManager) - case config.GatewayModeShared: - klog.Info("Preparing Shared Gateway") - gw, err = newSharedGateway(nc.name, subnets, gatewayNextHops, gatewayIntf, egressGWInterface, ifAddrs, nodeAnnotator, nc.Kube, - managementPortConfig, nc.watchFactory, nc.routeManager) + case config.GatewayModeLocal, config.GatewayModeShared: + klog.Info("Preparing Gateway") + gw, err = newGateway(nc.name, subnets, gatewayNextHops, gatewayIntf, egressGWInterface, ifAddrs, nodeAnnotator, + managementPortConfig, nc.Kube, nc.watchFactory, nc.routeManager, nc.linkManager, nc.nadController, config.Gateway.Mode) case config.GatewayModeDisabled: var chassisID string klog.Info("Gateway Mode is disabled") @@ -376,7 +371,7 @@ func (nc *DefaultNodeNetworkController) initGateway(subnets []*net.IPNet, nodeAn } chassisID, err = util.GetNodeChassisID() if err != nil { - return err + return nil, err } err = util.SetL3GatewayConfig(nodeAnnotator, &util.L3GatewayConfig{ Mode: config.GatewayModeDisabled, @@ -384,12 +379,56 @@ func (nc *DefaultNodeNetworkController) initGateway(subnets []*net.IPNet, nodeAn }) } if err != nil { - return err + return nil, err + } + + initGwFunc := func() error { + return gw.initFunc() + } + + readyGwFunc := func() (bool, error) { + controllerReady, err := isOVNControllerReady() + if err != nil || !controllerReady { + return false, err + } + return gw.readyFunc() + } + + if err := nodeAnnotator.Run(); err != nil { + return nil, fmt.Errorf("failed to set node %s annotations: %w", nc.name, err) + } + + waiter.AddWait(readyGwFunc, initGwFunc) + nc.Gateway = gw + + // Wait for management port and gateway resources to be created by the master + start := time.Now() + if err := waiter.Wait(); err != nil { + return nil, err + } + klog.Infof("Gateway and management port readiness took %v", time.Since(start)) + + return gw, nil +} + +// initGatewayMainStart finishes the gateway initialization for the node: it initializes the +// LB health checker and port claim watcher; it starts watching for events on services and endpoint slices, +// so that LB health checker, port claim watcher, node port watcher and node port watcher ip tables can +// react to those events. +func (nc *DefaultNodeNetworkController) initGatewayMainStart(gw *gateway, waiter *startupWaiter) error { + klog.Info("Initializing Gateway Functionality for gateway Start") + + var loadBalancerHealthChecker *loadBalancerHealthChecker + var portClaimWatcher *portClaimWatcher + + var err error + if config.Gateway.NodeportEnable && config.OvnKubeNode.Mode == types.NodeModeFull { + loadBalancerHealthChecker = newLoadBalancerHealthChecker(nc.name, nc.watchFactory) + portClaimWatcher, err = newPortClaimWatcher(nc.recorder) + if err != nil { + return err + } } - // a golang interface has two values . an interface is nil if both type and - // value is nil. so, you cannot directly set the value to an interface and later check if - // value was nil by comparing the interface to nil. this is because if the value is `nil`, - // then the interface will still hold the type of the value being set. if loadBalancerHealthChecker != nil { gw.loadBalancerHealthChecker = loadBalancerHealthChecker @@ -403,14 +442,8 @@ func (nc *DefaultNodeNetworkController) initGateway(subnets []*net.IPNet, nodeAn } readyGwFunc := func() (bool, error) { - controllerReady, err := isOVNControllerReady() - if err != nil || !controllerReady { - return false, err - } - - return gw.readyFunc() + return true, nil } - waiter.AddWait(readyGwFunc, initGwFunc) nc.Gateway = gw @@ -450,7 +483,7 @@ func (nc *DefaultNodeNetworkController) initGatewayDPUHost(kubeNodeIP net.IP) er } config.Gateway.Interface = gwIntf - gatewayNextHops, gatewayIntf, err := getGatewayNextHops() + _, gatewayIntf, err := getGatewayNextHops() if err != nil { return err } @@ -480,7 +513,7 @@ func (nc *DefaultNodeNetworkController) initGatewayDPUHost(kubeNodeIP net.IP) er return fmt.Errorf("failed to update masquerade subnet annotation on node: %s, error: %v", nc.name, err) } - err = configureSvcRouteViaInterface(nc.routeManager, gatewayIntf, gatewayNextHops) + err = configureSvcRouteViaInterface(nc.routeManager, gatewayIntf, DummyNextHopIPs()) if err != nil { return err } @@ -497,7 +530,7 @@ func (nc *DefaultNodeNetworkController) initGatewayDPUHost(kubeNodeIP net.IP) er if err := initSharedGatewayIPTables(); err != nil { return err } - gw.nodePortWatcherIptables = newNodePortWatcherIptables() + gw.nodePortWatcherIptables = newNodePortWatcherIptables(nc.nadController) gw.loadBalancerHealthChecker = newLoadBalancerHealthChecker(nc.name, nc.watchFactory) portClaimWatcher, err := newPortClaimWatcher(nc.recorder) if err != nil { @@ -538,13 +571,23 @@ func CleanupClusterNode(name string) error { klog.Errorf("Failed to delete ovn-bridge-mappings, stdout: %q, stderr: %q, error: %v", stdout, stderr, err) } - // Delete iptable rules for management port - DelMgtPortIptRules() + // Clean up legacy IPTables rules for management port + DelLegacyMgtPortIptRules() + + // Delete nftables rules + nodenft.CleanupNFTables() return nil } func (nc *DefaultNodeNetworkController) updateGatewayMAC(link netlink.Link) error { + // TBD-merge for dpu-host mode: if interface mac of the dpu-host interface that connects to the + // gateway bridge on the dpu changes, we need to update dpu's gatewayBridge.macAddress L3 gateway + // annotation (see bridgeForInterface) + if config.OvnKubeNode.Mode != types.NodeModeFull { + return nil + } + if nc.Gateway.GetGatewayBridgeIface() != link.Attrs().Name { return nil } diff --git a/go-controller/pkg/node/gateway_init_linux_test.go b/go-controller/pkg/node/gateway_init_linux_test.go index 6a01d28fbc..8dc76d86aa 100644 --- a/go-controller/pkg/node/gateway_init_linux_test.go +++ b/go-controller/pkg/node/gateway_init_linux_test.go @@ -25,13 +25,19 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" + nadfake "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/fake" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" adminpolicybasedrouteclient "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/clientset/versioned/fake" + udnfakeclient "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/fake" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" + networkAttachDefController "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/network-attach-def-controller" + nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/routemanager" ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" linkMock "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/mocks/github.com/vishvananda/netlink" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/nad" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" utilMock "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/mocks" @@ -40,10 +46,46 @@ import ( "github.com/containernetworking/plugins/pkg/testutils" "github.com/vishvananda/netlink" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) +// The base expected nftables rules. You must substitute in the management port interface name. +const baseNFTRulesFmt = ` +add table inet ovn-kubernetes +add chain inet ovn-kubernetes mgmtport-snat { type nat hook postrouting priority 100 ; comment "OVN SNAT to Management Port" ; } +add rule inet ovn-kubernetes mgmtport-snat oifname != %q return +add rule inet ovn-kubernetes mgmtport-snat meta l4proto . th dport @mgmtport-no-snat-nodeports counter return +add rule inet ovn-kubernetes mgmtport-snat ip daddr . meta l4proto . th dport @mgmtport-no-snat-services-v4 counter return +add rule inet ovn-kubernetes mgmtport-snat counter snat ip to 10.1.1.0 +add set inet ovn-kubernetes mgmtport-no-snat-nodeports { type inet_proto . inet_service ; comment "NodePorts not subject to management port SNAT" ; } +add set inet ovn-kubernetes mgmtport-no-snat-services-v4 { type ipv4_addr . inet_proto . inet_service ; comment "eTP:Local short-circuit not subject to management port SNAT (IPv4)" ; } +add set inet ovn-kubernetes mgmtport-no-snat-services-v6 { type ipv6_addr . inet_proto . inet_service ; comment "eTP:Local short-circuit not subject to management port SNAT (IPv6)" ; } +` + +// The base expected nftables rules with UDN enabled. You must substitute in the management port interface name. +const baseUDNNFTRulesFmt = ` +add map inet ovn-kubernetes udn-mark-nodeports { type inet_proto . inet_service : verdict ; comment "UDN services NodePorts mark" ; } +add map inet ovn-kubernetes udn-mark-external-ips-v4 { type ipv4_addr . inet_proto . inet_service : verdict ; comment "UDN services External IPs mark (IPv4)" ; } +add map inet ovn-kubernetes udn-mark-external-ips-v6 { type ipv6_addr . inet_proto . inet_service : verdict ; comment "UDN services External IPs mark (IPv6)" ; } +add chain inet ovn-kubernetes udn-service-mark { comment "UDN services packet mark" ; } +add rule inet ovn-kubernetes udn-service-mark fib daddr type local meta l4proto . th dport vmap @udn-mark-nodeports +add rule inet ovn-kubernetes udn-service-mark ip daddr . meta l4proto . th dport vmap @udn-mark-external-ips-v4 +add rule inet ovn-kubernetes udn-service-mark ip6 daddr . meta l4proto . th dport vmap @udn-mark-external-ips-v6 +add chain inet ovn-kubernetes udn-service-prerouting { type filter hook prerouting priority -150 ; comment "UDN services packet mark - Prerouting" ; } +add rule inet ovn-kubernetes udn-service-prerouting iifname != %q jump udn-service-mark +add chain inet ovn-kubernetes udn-service-output { type filter hook output priority -150 ; comment "UDN services packet mark - Output" ; } +add rule inet ovn-kubernetes udn-service-output jump udn-service-mark +` + +func getBaseNFTRules(mgmtPort string) string { + ret := fmt.Sprintf(baseNFTRulesFmt, mgmtPort) + if util.IsNetworkSegmentationSupportEnabled() { + ret += fmt.Sprintf(baseUDNNFTRulesFmt, mgmtPort) + } + return ret +} + func shareGatewayInterfaceTest(app *cli.App, testNS ns.NetNS, eth0Name, eth0MAC, eth0GWIP, eth0CIDR string, gatewayVLANID uint, l netlink.Link, hwOffload, setNodeIP bool) { const mtu string = "1234" @@ -166,10 +208,6 @@ func shareGatewayInterfaceTest(app *cli.App, testNS ns.NetNS, "ovs-ofctl -O OpenFlow13 --bundle replace-flows breth0 -", }) // nodePortWatcher() - fexec.AddFakeCmd(&ovntest.ExpectedCmd{ - Cmd: "ovs-vsctl --timeout=15 --if-exists get interface patch-breth0_" + nodeName + "-to-br-int ofport", - Output: "5", - }) fexec.AddFakeCmd(&ovntest.ExpectedCmd{ Cmd: "ovs-vsctl --timeout=15 --if-exists get interface eth0 ofport", Output: "7", @@ -201,9 +239,11 @@ func shareGatewayInterfaceTest(app *cli.App, testNS ns.NetNS, _, nodeNet, err := net.ParseCIDR(nodeSubnet) Expect(err).NotTo(HaveOccurred()) + iptV4, iptV6 := util.SetFakeIPTablesHelpers() + nft := nodenft.SetFakeNFTablesHelper() + // Make a fake MgmtPortConfig with only the fields we care about fakeMgmtPortIPFamilyConfig := managementPortIPFamilyConfig{ - ipt: nil, allSubnets: nil, ifAddr: nodeNet, gwIP: nodeNet.IP, @@ -213,15 +253,19 @@ func shareGatewayInterfaceTest(app *cli.App, testNS ns.NetNS, ifName: nodeName, link: nil, routerMAC: nil, + nft: nft, ipv4: &fakeMgmtPortIPFamilyConfig, ipv6: nil, } + err = setupManagementPortNFTables(&fakeMgmtPortConfig) + Expect(err).NotTo(HaveOccurred()) kubeFakeClient := fake.NewSimpleClientset(&v1.NodeList{ Items: []v1.Node{existingNode}, }) fakeClient := &util.OVNNodeClientset{ - KubeClient: kubeFakeClient, + KubeClient: kubeFakeClient, + NetworkAttchDefClient: nadfake.NewSimpleClientset(), } stop := make(chan struct{}) @@ -238,8 +282,6 @@ func shareGatewayInterfaceTest(app *cli.App, testNS ns.NetNS, k := &kube.Kube{KClient: kubeFakeClient} - iptV4, iptV6 := util.SetFakeIPTablesHelpers() - nodeAnnotator := kube.NewNodeAnnotator(k, existingNode.Name) err = util.SetNodeHostSubnetAnnotation(nodeAnnotator, ovntest.MustParseIPNets(nodeSubnet)) @@ -247,6 +289,16 @@ func shareGatewayInterfaceTest(app *cli.App, testNS ns.NetNS, err = nodeAnnotator.Run() Expect(err).NotTo(HaveOccurred()) rm := routemanager.NewController() + var nadController *networkAttachDefController.NetAttachDefinitionController + if util.IsNetworkSegmentationSupportEnabled() { + testNCM := &nad.FakeNetworkControllerManager{} + nadController, err = networkAttachDefController.NewNetAttachDefinitionController("test", testNCM, wf, nil) + Expect(err).NotTo(HaveOccurred()) + err = nadController.Start() + Expect(err).NotTo(HaveOccurred()) + defer nadController.Stop() + } + Expect(err).NotTo(HaveOccurred()) wg.Add(1) go testNS.Do(func(netNS ns.NetNS) error { defer GinkgoRecover() @@ -293,8 +345,10 @@ func shareGatewayInterfaceTest(app *cli.App, testNS ns.NetNS, gatewayNextHops, gatewayIntf, err := getGatewayNextHops() Expect(err).NotTo(HaveOccurred()) ifAddrs := ovntest.MustParseIPNets(eth0CIDR) - sharedGw, err := newSharedGateway(nodeName, ovntest.MustParseIPNets(nodeSubnet), gatewayNextHops, gatewayIntf, "", ifAddrs, nodeAnnotator, k, - &fakeMgmtPortConfig, wf, rm) + sharedGw, err := newGateway(nodeName, ovntest.MustParseIPNets(nodeSubnet), gatewayNextHops, gatewayIntf, "", ifAddrs, nodeAnnotator, + &fakeMgmtPortConfig, k, wf, rm, nil, nadController, config.GatewayModeShared) + Expect(err).NotTo(HaveOccurred()) + err = sharedGw.initFunc() Expect(err).NotTo(HaveOccurred()) err = sharedGw.Init(stop, wg) Expect(err).NotTo(HaveOccurred()) @@ -399,12 +453,11 @@ func shareGatewayInterfaceTest(app *cli.App, testNS ns.NetNS, "POSTROUTING": []string{ "-j OVN-KUBE-EGRESS-SVC", }, - "OVN-KUBE-NODEPORT": []string{}, - "OVN-KUBE-EXTERNALIP": []string{}, - "OVN-KUBE-SNAT-MGMTPORT": []string{}, - "OVN-KUBE-ETP": []string{}, - "OVN-KUBE-ITP": []string{}, - "OVN-KUBE-EGRESS-SVC": []string{}, + "OVN-KUBE-NODEPORT": []string{}, + "OVN-KUBE-EXTERNALIP": []string{}, + "OVN-KUBE-ETP": []string{}, + "OVN-KUBE-ITP": []string{}, + "OVN-KUBE-EGRESS-SVC": []string{}, }, "filter": {}, "mangle": { @@ -435,6 +488,10 @@ func shareGatewayInterfaceTest(app *cli.App, testNS ns.NetNS, err = f6.MatchState(expectedTables, nil) Expect(err).NotTo(HaveOccurred()) + expectedNFT := getBaseNFTRules(fakeMgmtPortConfig.ifName) + err = nodenft.MatchNFTRules(expectedNFT, nft.Dump()) + Expect(err).NotTo(HaveOccurred()) + // check that masquerade subnet annotation got updated node, err := wf.GetNode(nodeName) Expect(err).NotTo(HaveOccurred()) @@ -597,24 +654,11 @@ func shareGatewayInterfaceDPUTest(app *cli.App, testNS ns.NetNS, Cmd: "ovs-vsctl --timeout=15 get interface " + hostRep + " ofport", Output: "9", }) - fexec.AddFakeCmdsNoOutputNoError([]string{ - "ovs-vsctl --timeout=15 get Open_vSwitch . external_ids:ovn-encap-ip", - }) - fexec.AddFakeCmdsNoOutputNoError([]string{ - "ovs-vsctl --timeout=15 set Open_vSwitch . external_ids:ovn-encap-ip=192.168.1.101", - }) - fexec.AddFakeCmdsNoOutputNoError([]string{ - "ovn-appctl --timeout=5 -t ovn-controller exit --restart", - }) // cleanup flows fexec.AddFakeCmdsNoOutputNoError([]string{ "ovs-ofctl -O OpenFlow13 --bundle replace-flows " + brphys + " -", }) // nodePortWatcher() - fexec.AddFakeCmd(&ovntest.ExpectedCmd{ - Cmd: "ovs-vsctl --timeout=15 --if-exists get interface patch-" + brphys + "_" + nodeName + "-to-br-int ofport", - Output: "5", - }) fexec.AddFakeCmd(&ovntest.ExpectedCmd{ Cmd: "ovs-vsctl --timeout=15 --if-exists get interface " + uplinkPort + " ofport", Output: "7", @@ -637,7 +681,8 @@ func shareGatewayInterfaceDPUTest(app *cli.App, testNS ns.NetNS, Items: []v1.Node{existingNode}, }) fakeClient := &util.OVNNodeClientset{ - KubeClient: kubeFakeClient, + KubeClient: kubeFakeClient, + NetworkAttchDefClient: nadfake.NewSimpleClientset(), } _, nodeNet, err := net.ParseCIDR(nodeSubnet) @@ -645,19 +690,22 @@ func shareGatewayInterfaceDPUTest(app *cli.App, testNS ns.NetNS, // Make a fake MgmtPortConfig with only the fields we care about fakeMgmtPortIPFamilyConfig := managementPortIPFamilyConfig{ - ipt: nil, allSubnets: nil, ifAddr: nodeNet, gwIP: nodeNet.IP, } + nft := nodenft.SetFakeNFTablesHelper() fakeMgmtPortConfig := managementPortConfig{ ifName: nodeName, link: nil, routerMAC: nil, + nft: nft, ipv4: &fakeMgmtPortIPFamilyConfig, ipv6: nil, } + err = setupManagementPortNFTables(&fakeMgmtPortConfig) + Expect(err).NotTo(HaveOccurred()) stop := make(chan struct{}) wf, err := factory.NewNodeWatchFactory(fakeClient, nodeName) @@ -685,6 +733,15 @@ func shareGatewayInterfaceDPUTest(app *cli.App, testNS ns.NetNS, runtime.LockOSThread() defer runtime.UnlockOSThread() rm := routemanager.NewController() + var nadController *networkAttachDefController.NetAttachDefinitionController + if util.IsNetworkSegmentationSupportEnabled() { + testNCM := &nad.FakeNetworkControllerManager{} + nadController, err = networkAttachDefController.NewNetAttachDefinitionController("test", testNCM, wf, nil) + Expect(err).NotTo(HaveOccurred()) + err = nadController.Start() + Expect(err).NotTo(HaveOccurred()) + defer nadController.Stop() + } wg.Add(1) go testNS.Do(func(netNS ns.NetNS) error { defer GinkgoRecover() @@ -692,7 +749,7 @@ func shareGatewayInterfaceDPUTest(app *cli.App, testNS ns.NetNS, wg.Done() return nil }) - // FIXME(mk): starting the gateaway causing go routines to be spawned within sub functions and therefore they escape the + // FIXME(mk): starting the gateway causing go routines to be spawned within sub functions and therefore they escape the // netns we wanted to set it to originally here. Refactor test cases to not spawn a go routine or just fake out everything // and remove need to create netns err = testNS.Do(func(ns.NetNS) error { @@ -700,8 +757,10 @@ func shareGatewayInterfaceDPUTest(app *cli.App, testNS ns.NetNS, gatewayNextHops, gatewayIntf, err := getGatewayNextHops() Expect(err).NotTo(HaveOccurred()) - sharedGw, err := newSharedGateway(nodeName, ovntest.MustParseIPNets(nodeSubnet), gatewayNextHops, - gatewayIntf, "", ifAddrs, nodeAnnotator, k, &fakeMgmtPortConfig, wf, rm) + sharedGw, err := newGateway(nodeName, ovntest.MustParseIPNets(nodeSubnet), gatewayNextHops, + gatewayIntf, "", ifAddrs, nodeAnnotator, &fakeMgmtPortConfig, k, wf, rm, nil, nadController, config.GatewayModeShared) + Expect(err).NotTo(HaveOccurred()) + err = sharedGw.initFunc() Expect(err).NotTo(HaveOccurred()) err = sharedGw.Init(stop, wg) Expect(err).NotTo(HaveOccurred()) @@ -788,6 +847,7 @@ func shareGatewayInterfaceDPUHostTest(app *cli.App, testNS ns.NetNS, uplinkName, fakeClient := &util.OVNNodeClientset{ KubeClient: kubeFakeClient, AdminPolicyRouteClient: adminpolicybasedrouteclient.NewSimpleClientset(), + NetworkAttchDefClient: nadfake.NewSimpleClientset(), } stop := make(chan struct{}) @@ -806,7 +866,7 @@ func shareGatewayInterfaceDPUHostTest(app *cli.App, testNS ns.NetNS, uplinkName, ipnet.IP = ip routeManager := routemanager.NewController() cnnci := NewCommonNodeNetworkControllerInfo(kubeFakeClient, fakeClient.AdminPolicyRouteClient, wf, nil, nodeName, routeManager) - nc := newDefaultNodeNetworkController(cnnci, stop, wg, routeManager) + nc := newDefaultNodeNetworkController(cnnci, stop, wg, routeManager, nil) // must run route manager manually which is usually started with nc.Start() wg.Add(1) go testNS.Do(func(netNS ns.NetNS) error { @@ -829,7 +889,7 @@ func shareGatewayInterfaceDPUHostTest(app *cli.App, testNS ns.NetNS, uplinkName, expRoute := &netlink.Route{ Dst: ovntest.MustParseIPNet(svcCIDR), LinkIndex: link.Attrs().Index, - Gw: ovntest.MustParseIP(gwIP), + Gw: ovntest.MustParseIP(config.Gateway.MasqueradeIPs.V4DummyNextHopMasqueradeIP.String()), } Eventually(func() error { r, err := util.LinkRouteGetFilteredRoute( @@ -1027,10 +1087,6 @@ OFPT_GET_CONFIG_REPLY (xid=0x4): frags=normal miss_send_len=0` Cmd: "ovs-ofctl show breth0", Output: ovsOFOutput, }) - fexec.AddFakeCmd(&ovntest.ExpectedCmd{ - Cmd: "ovs-vsctl --timeout=15 --if-exists get interface patch-breth0_" + nodeName + "-to-br-int ofport", - Output: "5", - }) fexec.AddFakeCmd(&ovntest.ExpectedCmd{ Cmd: "ovs-vsctl --timeout=15 --if-exists get interface eth0 ofport", Output: "7", @@ -1083,19 +1139,26 @@ OFPT_GET_CONFIG_REPLY (xid=0x4): frags=normal miss_send_len=0` // Make a fake MgmtPortConfig with only the fields we care about fakeMgmtPortIPFamilyConfig := managementPortIPFamilyConfig{ - ipt: nil, allSubnets: nil, ifAddr: nodeNet, gwIP: nodeNet.IP, } + nft := nodenft.SetFakeNFTablesHelper() fakeMgmtPortConfig := managementPortConfig{ ifName: types.K8sMgmtIntfName, link: nil, routerMAC: nil, + nft: nft, ipv4: &fakeMgmtPortIPFamilyConfig, ipv6: nil, } + err = setupManagementPortNFTables(&fakeMgmtPortConfig) + Expect(err).NotTo(HaveOccurred()) + if util.IsNetworkSegmentationSupportEnabled() { + err = configureUDNServicesNFTables() + Expect(err).NotTo(HaveOccurred()) + } kubeFakeClient := fake.NewSimpleClientset( &v1.NodeList{ @@ -1105,7 +1168,9 @@ OFPT_GET_CONFIG_REPLY (xid=0x4): frags=normal miss_send_len=0` &endpointSlice, ) fakeClient := &util.OVNNodeClientset{ - KubeClient: kubeFakeClient, + KubeClient: kubeFakeClient, + NetworkAttchDefClient: nadfake.NewSimpleClientset(), + UserDefinedNetworkClient: udnfakeclient.NewSimpleClientset(), } stop := make(chan struct{}) @@ -1132,6 +1197,15 @@ OFPT_GET_CONFIG_REPLY (xid=0x4): frags=normal miss_send_len=0` ip, ipNet, _ := net.ParseCIDR(eth0CIDR) ipNet.IP = ip rm := routemanager.NewController() + var nadController *networkAttachDefController.NetAttachDefinitionController + if util.IsNetworkSegmentationSupportEnabled() { + testNCM := &nad.FakeNetworkControllerManager{} + nadController, err = networkAttachDefController.NewNetAttachDefinitionController("test", testNCM, wf, nil) + Expect(err).NotTo(HaveOccurred()) + err = nadController.Start() + Expect(err).NotTo(HaveOccurred()) + defer nadController.Stop() + } go testNS.Do(func(netNS ns.NetNS) error { defer GinkgoRecover() rm.Run(stop, 10*time.Second) @@ -1144,8 +1218,10 @@ OFPT_GET_CONFIG_REPLY (xid=0x4): frags=normal miss_send_len=0` gatewayNextHops, gatewayIntf, err := getGatewayNextHops() Expect(err).NotTo(HaveOccurred()) ifAddrs := ovntest.MustParseIPNets(eth0CIDR) - localGw, err := newLocalGateway(nodeName, ovntest.MustParseIPNets(nodeSubnet), gatewayNextHops, gatewayIntf, "", ifAddrs, - nodeAnnotator, &fakeMgmtPortConfig, k, wf, rm) + localGw, err := newGateway(nodeName, ovntest.MustParseIPNets(nodeSubnet), gatewayNextHops, gatewayIntf, "", ifAddrs, + nodeAnnotator, &fakeMgmtPortConfig, k, wf, rm, nil, nadController, config.GatewayModeLocal) + Expect(err).NotTo(HaveOccurred()) + err = localGw.initFunc() Expect(err).NotTo(HaveOccurred()) err = localGw.Init(stop, wg) Expect(err).NotTo(HaveOccurred()) @@ -1225,10 +1301,9 @@ OFPT_GET_CONFIG_REPLY (xid=0x4): frags=normal miss_send_len=0` "-s 169.254.169.1 -j MASQUERADE", "-s 10.1.1.0/24 -j MASQUERADE", }, - "OVN-KUBE-SNAT-MGMTPORT": []string{}, - "OVN-KUBE-ETP": []string{}, - "OVN-KUBE-ITP": []string{}, - "OVN-KUBE-EGRESS-SVC": []string{}, + "OVN-KUBE-ETP": []string{}, + "OVN-KUBE-ITP": []string{}, + "OVN-KUBE-EGRESS-SVC": []string{}, }, "filter": { "FORWARD": []string{ @@ -1260,6 +1335,16 @@ OFPT_GET_CONFIG_REPLY (xid=0x4): frags=normal miss_send_len=0` expectedTables["filter"]["FORWARD"] = append(expectedMCSRules, expectedTables["filter"]["FORWARD"]...) expectedTables["filter"]["OUTPUT"] = append(expectedMCSRules, expectedTables["filter"]["OUTPUT"]...) // END OCP HACK + if util.IsNetworkSegmentationSupportEnabled() { + expectedTables["nat"]["POSTROUTING"] = append(expectedTables["nat"]["POSTROUTING"], + "-j OVN-KUBE-UDN-MASQUERADE", + ) + expectedTables["nat"]["OVN-KUBE-UDN-MASQUERADE"] = append(expectedTables["nat"]["OVN-KUBE-UDN-MASQUERADE"], + "-s 169.254.169.2/29 -j RETURN", // this guarantees we don't SNAT default network masqueradeIPs + "-d 172.16.1.0/24 -j RETURN", // this guarantees we don't SNAT service traffic + "-s 169.254.169.0/29 -j MASQUERADE", // this guarantees we SNAT all UDN MasqueradeIPs traffic leaving the node + ) + } f4 := iptV4.(*util.FakeIPTables) err = f4.MatchState(expectedTables, map[util.FakePolicyKey]string{{ Table: "filter", @@ -1275,6 +1360,11 @@ OFPT_GET_CONFIG_REPLY (xid=0x4): frags=normal miss_send_len=0` f6 := iptV6.(*util.FakeIPTables) err = f6.MatchState(expectedTables, nil) Expect(err).NotTo(HaveOccurred()) + + expectedNFT := getBaseNFTRules(fakeMgmtPortConfig.ifName) + err = nodenft.MatchNFTRules(expectedNFT, nft.Dump()) + Expect(err).NotTo(HaveOccurred()) + return nil } @@ -1356,6 +1446,12 @@ var _ = Describe("Gateway Init Operations", func() { localGatewayInterfaceTest(app, testNS, eth0Name, eth0MAC, eth0GWIP, eth0CIDR, link) }) + ovntest.OnSupportedPlatformsIt("sets up a local gateway with predetermined interface when network-segmentation is enabled", func() { + config.OVNKubernetesFeature.EnableNetworkSegmentation = true + config.OVNKubernetesFeature.EnableMultiNetwork = true + localGatewayInterfaceTest(app, testNS, eth0Name, eth0MAC, eth0GWIP, eth0CIDR, link) + }) + ovntest.OnSupportedPlatformsIt("sets up a local gateway with predetermined interface and no default route", func() { localGatewayInterfaceTest(app, testNS, eth0Name, eth0MAC, "", eth0CIDR, link) }) diff --git a/go-controller/pkg/node/gateway_iptables.go b/go-controller/pkg/node/gateway_iptables.go index c172b770a3..1fc96ca8a6 100644 --- a/go-controller/pkg/node/gateway_iptables.go +++ b/go-controller/pkg/node/gateway_iptables.go @@ -12,6 +12,7 @@ import ( utilnet "k8s.io/utils/net" "github.com/coreos/go-iptables/iptables" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/controllers/egressservice" nodeipt "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/iptables" @@ -20,10 +21,11 @@ import ( ) const ( - iptableNodePortChain = "OVN-KUBE-NODEPORT" // called from nat-PREROUTING and nat-OUTPUT - iptableExternalIPChain = "OVN-KUBE-EXTERNALIP" // called from nat-PREROUTING and nat-OUTPUT - iptableETPChain = "OVN-KUBE-ETP" // called from nat-PREROUTING only - iptableITPChain = "OVN-KUBE-ITP" // called from mangle-OUTPUT and nat-OUTPUT + iptableNodePortChain = "OVN-KUBE-NODEPORT" // called from nat-PREROUTING and nat-OUTPUT + iptableExternalIPChain = "OVN-KUBE-EXTERNALIP" // called from nat-PREROUTING and nat-OUTPUT + iptableETPChain = "OVN-KUBE-ETP" // called from nat-PREROUTING only + iptableITPChain = "OVN-KUBE-ITP" // called from mangle-OUTPUT and nat-OUTPUT + iptableUDNMasqueradeChain = "OVN-KUBE-UDN-MASQUERADE" // called from nat-POSTROUTING ) func clusterIPTablesProtocols() []iptables.Protocol { @@ -78,6 +80,18 @@ func deleteIptRules(rules []nodeipt.Rule) error { return nodeipt.DelRules(rules) } +// ensureChain ensures that a chain exists within a table +func ensureChain(table, chain string) error { + for _, proto := range clusterIPTablesProtocols() { + ipt, err := util.GetIPTablesHelper(proto) + if err != nil { + return fmt.Errorf("failed to get IPTables helper to add UDN chain: %v", err) + } + addChaintoTable(ipt, table, chain) + } + return nil +} + func getGatewayInitRules(chain string, proto iptables.Protocol) []nodeipt.Rule { iptRules := []nodeipt.Rule{} if chain == egressservice.Chain { @@ -194,51 +208,10 @@ func getITPLocalIPTRules(svcPort kapi.ServicePort, clusterIP string, svcHasLocal } } -// getNodePortETPLocalIPTRule returns the IPTable REDIRECT or RETURN rules for a service of type nodePort if ETP=local -// `svcPort` corresponds to port details for this service as specified in the service object -// `targetIP` corresponds to svc.spec.ClusterIP -// This function returns a RETURN rule in iptableMgmPortChain to prevent SNAT of sourceIP -func getNodePortETPLocalIPTRule(svcPort kapi.ServicePort, targetIP string) nodeipt.Rule { - return getSkipMgmtSNATRule(string(svcPort.Protocol), fmt.Sprintf("%d", svcPort.NodePort), "", getIPTablesProtocol(targetIP)) -} - -// getSkipMgmtSNATRule generates the return iptables rule for avoiding SNAT to mgmt port -func getSkipMgmtSNATRule(protocol, port, destIP string, ipFamily iptables.Protocol) nodeipt.Rule { - args := make([]string, 0, 8) - args = append(args, "-p", protocol) - if len(destIP) > 0 { - args = append(args, "-d", destIP) - } - args = append(args, "--dport", port, "-j", "RETURN") - n := nodeipt.Rule{ - Table: "nat", - Chain: iptableMgmPortChain, - Args: args, - Protocol: ipFamily, - } - return n -} - func computeProbability(n, i int) string { return fmt.Sprintf("%0.10f", 1.0/float64(n-i+1)) } -func generateSkipMgmtForLocalEndpoints(svcPort kapi.ServicePort, externalIP string, localEndpoints []string) []nodeipt.Rule { - iptRules := make([]nodeipt.Rule, 0, len(localEndpoints)) - for _, localEndpoint := range localEndpoints { - if len(localEndpoint) == 0 { - continue - } - iptRules = append([]nodeipt.Rule{getSkipMgmtSNATRule( - string(svcPort.Protocol), - fmt.Sprintf("%v", int32(svcPort.TargetPort.IntValue())), - localEndpoint, - getIPTablesProtocol(externalIP), - )}, iptRules...) - } - return iptRules -} - func generateIPTRulesForLoadBalancersWithoutNodePorts(svcPort kapi.ServicePort, externalIP string, localEndpoints []string) []nodeipt.Rule { iptRules := make([]nodeipt.Rule, 0, len(localEndpoints)) if len(localEndpoints) == 0 { @@ -440,14 +413,14 @@ func getLocalGatewayFilterRules(ifname string, cidr *net.IPNet) []nodeipt.Rule { } } -func getLocalGatewayNATRules(ifname string, cidr *net.IPNet) []nodeipt.Rule { +func getLocalGatewayNATRules(cidr *net.IPNet) []nodeipt.Rule { // Allow packets to/from the gateway interface in case defaults deny protocol := getIPTablesProtocol(cidr.IP.String()) masqueradeIP := config.Gateway.MasqueradeIPs.V4OVNMasqueradeIP if protocol == iptables.ProtocolIPv6 { masqueradeIP = config.Gateway.MasqueradeIPs.V6OVNMasqueradeIP } - return []nodeipt.Rule{ + rules := []nodeipt.Rule{ { Table: "nat", Chain: "POSTROUTING", @@ -467,6 +440,78 @@ func getLocalGatewayNATRules(ifname string, cidr *net.IPNet) []nodeipt.Rule { Protocol: protocol, }, } + // FIXME(tssurya): If the feature is disabled we should be removing + // these rules + if util.IsNetworkSegmentationSupportEnabled() { + rules = append(rules, getUDNMasqueradeRules(protocol)...) + } + return rules +} + +// getUDNMasqueradeRules is only called for local-gateway-mode +func getUDNMasqueradeRules(protocol iptables.Protocol) []nodeipt.Rule { + // the following rules are actively used only for the UDN Feature: + // -A POSTROUTING -j OVN-KUBE-UDN-MASQUERADE + // -A OVN-KUBE-UDN-MASQUERADE -s 169.254.0.0/29 -j RETURN + // -A OVN-KUBE-UDN-MASQUERADE -d 10.96.0.0/16 -j RETURN + // -A OVN-KUBE-UDN-MASQUERADE -s 169.254.0.0/17 -j MASQUERADE + // NOTE: Ordering is important here, the RETURN must come before + // the MASQUERADE rule. Please don't change the ordering. + srcUDNMasqueradePrefix := config.Gateway.V4MasqueradeSubnet + // defaultNetworkReservedMasqueradePrefix contains the first 6IPs in the masquerade + // range that shouldn't be MASQUERADED. Hence /29 and /125 is intentionally hardcoded here + defaultNetworkReservedMasqueradePrefix := config.Gateway.MasqueradeIPs.V4HostMasqueradeIP.String() + "/29" + ipFamily := utilnet.IPv4 + if protocol == iptables.ProtocolIPv6 { + srcUDNMasqueradePrefix = config.Gateway.V6MasqueradeSubnet + defaultNetworkReservedMasqueradePrefix = config.Gateway.MasqueradeIPs.V6HostMasqueradeIP.String() + "/125" + ipFamily = utilnet.IPv6 + } + rules := []nodeipt.Rule{ + { + Table: "nat", + Chain: "POSTROUTING", + Args: []string{"-j", iptableUDNMasqueradeChain}, // NOTE: AddRules will take care of creating the chain + Protocol: protocol, + }, + { + Table: "nat", + Chain: iptableUDNMasqueradeChain, + Args: []string{ + "-s", defaultNetworkReservedMasqueradePrefix, + "-j", "RETURN", + }, + Protocol: protocol, + }, + } + for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { + if utilnet.IPFamilyOfCIDR(svcCIDR) != ipFamily { + continue + } + rules = append(rules, + nodeipt.Rule{ + Table: "nat", + Chain: iptableUDNMasqueradeChain, + Args: []string{ + "-d", svcCIDR.String(), + "-j", "RETURN", + }, + Protocol: protocol, + }, + ) + } + rules = append(rules, + nodeipt.Rule{ + Table: "nat", + Chain: iptableUDNMasqueradeChain, + Args: []string{ + "-s", srcUDNMasqueradePrefix, + "-j", "MASQUERADE", + }, + Protocol: protocol, + }, + ) + return rules } // initLocalGatewayNATRules sets up iptables rules for interfaces @@ -480,7 +525,7 @@ func initLocalGatewayNATRules(ifname string, cidr *net.IPNet) error { } // append the masquerade rules in POSTROUTING table since that needs to be // evaluated last. - return appendIptRules(getLocalGatewayNATRules(ifname, cidr)) + return appendIptRules(getLocalGatewayNATRules(cidr)) } func addChaintoTable(ipt util.IPTablesHelper, tableName, chain string) { @@ -551,7 +596,9 @@ func recreateIPTRules(table, chain string, keepIPTRules []nodeipt.Rule) error { return utilerrors.Join(errors...) } -// getGatewayIPTRules returns ClusterIP, NodePort, ExternalIP and LoadBalancer iptables rules for service. +// getGatewayIPTRules returns ClusterIP, NodePort, ExternalIP and LoadBalancer iptables +// rules for service. This must be used in conjunction with getGatewayNFTRules. +// // case1: If !svcHasLocalHostNetEndPnt and svcTypeIsETPLocal rules that redirect traffic // to ovn-k8s-mp0 preserving sourceIP are added. // @@ -584,8 +631,7 @@ func getGatewayIPTRules(service *kapi.Service, localEndpoints []string, svcHasLo if config.Gateway.Mode == config.GatewayModeLocal { rules = append(rules, getNodePortIPTRules(svcPort, clusterIP, svcPort.NodePort, svcHasLocalHostNetEndPnt, svcTypeIsETPLocal)...) } - // add a skip SNAT rule to OVN-KUBE-SNAT-MGMTPORT to preserve sourceIP for etp=local traffic. - rules = append(rules, getNodePortETPLocalIPTRule(svcPort, clusterIP)) + // Note: getGatewayNFTRules will add rules to ensure that sourceIP is preserved } // case2 (see function description for details) rules = append(rules, getNodePortIPTRules(svcPort, clusterIP, svcPort.Port, svcHasLocalHostNetEndPnt, false)...) @@ -594,7 +640,6 @@ func getGatewayIPTRules(service *kapi.Service, localEndpoints []string, svcHasLo externalIPs := util.GetExternalAndLBIPs(service) - snatRulesCreated := false for _, externalIP := range externalIPs { err := util.ValidatePort(svcPort.Protocol, svcPort.Port) if err != nil { @@ -605,14 +650,9 @@ func getGatewayIPTRules(service *kapi.Service, localEndpoints []string, svcHasLo if svcTypeIsETPLocal && !svcHasLocalHostNetEndPnt { // case1 (see function description for details) // DNAT traffic to masqueradeIP:nodePort instead of clusterIP:Port. We are leveraging the existing rules for NODEPORT - // service so no need to add skip SNAT rule to OVN-KUBE-SNAT-MGMTPORT since the corresponding nodePort svc would have one. + // service so no need to add a rule to skip SNAT since the corresponding nodePort svc would have one. if !util.ServiceTypeHasNodePort(service) { rules = append(rules, generateIPTRulesForLoadBalancersWithoutNodePorts(svcPort, externalIP, localEndpoints)...) - // These rules are per endpoint and should only be created one time per endpoint and port combination - if !snatRulesCreated { - rules = append(rules, generateSkipMgmtForLocalEndpoints(svcPort, externalIP, localEndpoints)...) - snatRulesCreated = true - } } else { rules = append(rules, getExternalIPTRules(svcPort, externalIP, "", svcHasLocalHostNetEndPnt, svcTypeIsETPLocal)...) } diff --git a/go-controller/pkg/node/gateway_localnet.go b/go-controller/pkg/node/gateway_localnet.go index 8114f64d23..728561eedc 100644 --- a/go-controller/pkg/node/gateway_localnet.go +++ b/go-controller/pkg/node/gateway_localnet.go @@ -8,24 +8,19 @@ import ( "net" "strings" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/routemanager" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" - utilerrors "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/errors" "k8s.io/klog/v2" utilnet "k8s.io/utils/net" ) -func newLocalGateway(nodeName string, hostSubnets []*net.IPNet, gwNextHops []net.IP, gwIntf, egressGWIntf string, gwIPs []*net.IPNet, - nodeAnnotator kube.Annotator, cfg *managementPortConfig, kube kube.Interface, watchFactory factory.NodeWatchFactory, - routeManager *routemanager.Controller) (*gateway, error) { - klog.Info("Creating new local gateway") - gw := &gateway{} - +func initLocalGateway(hostSubnets []*net.IPNet, cfg *managementPortConfig) error { + klog.Info("Adding iptables masquerading rules for new local gateway") + if util.IsNetworkSegmentationSupportEnabled() { + if err := ensureChain("nat", iptableUDNMasqueradeChain); err != nil { + return fmt.Errorf("failed to ensure chain %s in NAT table: %w", iptableUDNMasqueradeChain, err) + } + } for _, hostSubnet := range hostSubnets { // local gateway mode uses mp0 as default path for all ingress traffic into OVN var nextHop *net.IPNet @@ -38,146 +33,11 @@ func newLocalGateway(nodeName string, hostSubnets []*net.IPNet, gwNextHops []net // add iptables masquerading for mp0 to exit the host for egress cidr := nextHop.IP.Mask(nextHop.Mask) cidrNet := &net.IPNet{IP: cidr, Mask: nextHop.Mask} - err := initLocalGatewayNATRules(cfg.ifName, cidrNet) - if err != nil { - return nil, fmt.Errorf("failed to add local NAT rules for: %s, err: %v", cfg.ifName, err) - } - } - - gwBridge, exGwBridge, err := gatewayInitInternal( - nodeName, gwIntf, egressGWIntf, gwNextHops, gwIPs, nodeAnnotator) - if err != nil { - return nil, err - } - - // OCP HACK -- block MCS ports https://github.com/openshift/ovn-kubernetes/pull/170 - if err := insertMCSBlockIptRules(); err != nil { - return nil, err - } - // END OCP HACK - - if exGwBridge != nil { - gw.readyFunc = func() (bool, error) { - gwBridge.Lock() - for _, netConfig := range gwBridge.netConfig { - ready, err := gatewayReady(netConfig.patchPort) - if err != nil || !ready { - gwBridge.Unlock() - return false, err - } - } - gwBridge.Unlock() - exGwBridge.Lock() - for _, netConfig := range exGwBridge.netConfig { - exGWReady, err := gatewayReady(netConfig.patchPort) - if err != nil || !exGWReady { - exGwBridge.Unlock() - return false, err - } - } - exGwBridge.Unlock() - return true, nil - } - } else { - gw.readyFunc = func() (bool, error) { - gwBridge.Lock() - for _, netConfig := range gwBridge.netConfig { - ready, err := gatewayReady(netConfig.patchPort) - if err != nil || !ready { - gwBridge.Unlock() - return false, err - } - } - gwBridge.Unlock() - return true, nil - } - } - - gw.initFunc = func() error { - klog.Info("Creating Local Gateway Openflow Manager") - err := setBridgeOfPorts(gwBridge) - if err != nil { - return err - } - if exGwBridge != nil { - err = setBridgeOfPorts(exGwBridge) - if err != nil { - return err - } - - } - - gw.nodeIPManager = newAddressManager(nodeName, kube, cfg, watchFactory, gwBridge) - - // Delete stale masquerade resources if there are any. This is to make sure that there - // are no Linux resouces with IP from old masquerade subnet when masquerade subnet - // gets changed as part of day2 operation. - if err := deleteStaleMasqueradeResources(gwBridge.bridgeName, nodeName, watchFactory); err != nil { - return fmt.Errorf("failed to remove stale masquerade resources: %w", err) - } - - if err := setNodeMasqueradeIPOnExtBridge(gwBridge.bridgeName); err != nil { - return fmt.Errorf("failed to set the node masquerade IP on the ext bridge %s: %v", gwBridge.bridgeName, err) - } - - if err := addMasqueradeRoute(routeManager, gwBridge.bridgeName, nodeName, gwIPs, watchFactory); err != nil { - return fmt.Errorf("failed to set the node masquerade route to OVN: %v", err) - } - - // Masquerade config mostly done on node, update annotation - if err := updateMasqueradeAnnotation(nodeName, kube); err != nil { - return fmt.Errorf("failed to update masquerade subnet annotation on node: %s, error: %v", nodeName, err) - } - - gw.openflowManager, err = newGatewayOpenFlowManager(gwBridge, exGwBridge, hostSubnets, gw.nodeIPManager.ListAddresses()) - if err != nil { - return err + if err := initLocalGatewayNATRules(cfg.ifName, cidrNet); err != nil { + return fmt.Errorf("failed to add local NAT rules for: %s, err: %v", cfg.ifName, err) } - // resync flows on IP change - gw.nodeIPManager.OnChanged = func() { - klog.V(5).Info("Node addresses changed, re-syncing bridge flows") - if err := gw.openflowManager.updateBridgeFlowCache(hostSubnets, gw.nodeIPManager.ListAddresses()); err != nil { - // very unlikely - somehow node has lost its IP address - klog.Errorf("Failed to re-generate gateway flows after address change: %v", err) - } - // update gateway IPs for service openflows programmed by nodePortWatcher interface - npw, _ := gw.nodePortWatcher.(*nodePortWatcher) - npw.updateGatewayIPs(gw.nodeIPManager) - // Services create OpenFlow flows as well, need to update them all - if gw.servicesRetryFramework != nil { - if errs := gw.addAllServices(); errs != nil { - err := utilerrors.Join(errs...) - klog.Errorf("Failed to sync all services after node IP change: %v", err) - } - } - gw.openflowManager.requestFlowSync() - } - - if config.Gateway.NodeportEnable { - if config.OvnKubeNode.Mode == types.NodeModeFull { - // (TODO): Internal Traffic Policy is not supported in DPU mode - if err := initSvcViaMgmPortRoutingRules(hostSubnets); err != nil { - return err - } - } - gw.nodePortWatcher, err = newNodePortWatcher(gwBridge, gw.openflowManager, gw.nodeIPManager, watchFactory) - if err != nil { - return err - } - } else { - // no service OpenFlows, request to sync flows now. - gw.openflowManager.requestFlowSync() - } - - if err := addHostMACBindings(gwBridge.bridgeName); err != nil { - return fmt.Errorf("failed to add MAC bindings for service routing") - } - - return nil } - gw.watchFactory = watchFactory.(*factory.WatchFactory) - klog.Info("Local Gateway Creation Complete") - return gw, nil + return nil } func getGatewayFamilyAddrs(gatewayIfAddrs []*net.IPNet) (string, string) { diff --git a/go-controller/pkg/node/gateway_localnet_linux_test.go b/go-controller/pkg/node/gateway_localnet_linux_test.go index bdd7e4647e..4ec1cd42d0 100644 --- a/go-controller/pkg/node/gateway_localnet_linux_test.go +++ b/go-controller/pkg/node/gateway_localnet_linux_test.go @@ -6,20 +6,21 @@ import ( "net" "sync" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/urfave/cli/v2" + "github.com/vishvananda/netlink" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" - nodeipt "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/iptables" + nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/retry" ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/mocks" - "github.com/urfave/cli/v2" - "github.com/vishvananda/netlink" - "github.com/coreos/go-iptables/iptables" kapi "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" discovery "k8s.io/api/discovery/v1" @@ -27,6 +28,7 @@ import ( k8stypes "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/tools/cache" + "sigs.k8s.io/knftables" ) const ( @@ -53,15 +55,23 @@ func initFakeNodePortWatcher(iptV4, iptV6 util.IPTablesHelper) *nodePortWatcher gwMACParsed, _ := net.ParseMAC(gwMAC) + defaultNetConfig := &bridgeUDNConfiguration{ + ofPortPatch: "patch-breth0_ov", + } + fNPW := nodePortWatcher{ ofportPhys: "eth0", - ofportPatch: "patch-breth0_ov", gatewayIPv4: v4localnetGatewayIP, gatewayIPv6: v6localnetGatewayIP, serviceInfo: make(map[k8stypes.NamespacedName]*serviceConfig), ofm: &openflowManager{ - flowCache: map[string][]string{}, - defaultBridge: &bridgeConfiguration{macAddress: gwMACParsed}, + flowCache: map[string][]string{}, + defaultBridge: &bridgeConfiguration{ + macAddress: gwMACParsed, + netConfig: map[string]*bridgeUDNConfiguration{ + types.DefaultNetworkName: defaultNetConfig, + }, + }, }, } return &fNPW @@ -252,6 +262,7 @@ var _ = Describe("Node Operations", func() { fakeOvnNode *FakeOVNNode fExec *ovntest.FakeExec iptV4, iptV6 util.IPTablesHelper + nft *knftables.Fake fNPW *nodePortWatcher fakeMgmtPortConfig managementPortConfig netlinkMock *mocks.NetLinkOps @@ -275,11 +286,11 @@ var _ = Describe("Node Operations", func() { }) iptV4, iptV6 = util.SetFakeIPTablesHelpers() + nft = nodenft.SetFakeNFTablesHelper() _, nodeNet, err := net.ParseCIDR("10.1.1.0/24") Expect(err).NotTo(HaveOccurred()) // Make a fake MgmtPortConfig with only the fields we care about fakeMgmtPortIPFamilyConfig := managementPortIPFamilyConfig{ - ipt: nil, allSubnets: nil, ifAddr: nodeNet, gwIP: nodeNet.IP, @@ -288,9 +299,13 @@ var _ = Describe("Node Operations", func() { ifName: fakeNodeName, link: nil, routerMAC: nil, + nft: nft, ipv4: &fakeMgmtPortIPFamilyConfig, ipv6: nil, } + err = setupManagementPortNFTables(&fakeMgmtPortConfig) + Expect(err).NotTo(HaveOccurred()) + fNPW = initFakeNodePortWatcher(iptV4, iptV6) }) @@ -300,7 +315,7 @@ var _ = Describe("Node Operations", func() { }) Context("on startup", func() { - It("removes stale iptables rules while keeping remaining intact", func() { + It("removes stale iptables/nftables rules while keeping remaining intact", func() { app.Action = func(ctx *cli.Context) error { externalIP := "1.1.1.1" externalIPPort := int32(8032) @@ -339,8 +354,20 @@ var _ = Describe("Node Operations", func() { Expect(insertIptRules(fakeRules)).To(Succeed()) // Inject rules into SNAT MGMT chain that shouldn't exist and should be cleared on a restore, even if the chain has no rules - fakeRule := getSkipMgmtSNATRule("TCP", "1337", "8.8.8.8", iptables.ProtocolIPv4) - Expect(insertIptRules([]nodeipt.Rule{fakeRule})).To(Succeed()) + tx := nft.NewTransaction() + tx.Add(&knftables.Chain{ + Name: nftablesMgmtPortChain, + Comment: knftables.PtrTo("OVN SNAT to Management Port"), + + Type: knftables.PtrTo(knftables.NATType), + Hook: knftables.PtrTo(knftables.PostroutingHook), + Priority: knftables.PtrTo(knftables.SNATPriority), + }) + tx.Add(&knftables.Rule{ + Chain: nftablesMgmtPortChain, + Rule: "blah blah blah", + }) + Expect(nft.Run(context.Background(), tx)).To(Succeed()) expectedTables := map[string]util.FakeTable{ "nat": { @@ -348,9 +375,6 @@ var _ = Describe("Node Operations", func() { fmt.Sprintf("-p UDP -d 10.10.10.10 --dport 27000 -j DNAT --to-destination 172.32.0.12:27000"), fmt.Sprintf("-p %s -d %s --dport %v -j DNAT --to-destination %s:%v", service.Spec.Ports[0].Protocol, externalIP, service.Spec.Ports[0].Port, service.Spec.ClusterIP, service.Spec.Ports[0].Port), }, - iptableMgmPortChain: []string{ - fmt.Sprintf("-p TCP -d 8.8.8.8 --dport 1337 -j RETURN"), - }, }, "filter": {}, "mangle": {}, @@ -360,6 +384,10 @@ var _ = Describe("Node Operations", func() { err := f4.MatchState(expectedTables, nil) Expect(err).NotTo(HaveOccurred()) + expectedNFT := getBaseNFTRules(fakeMgmtPortConfig.ifName) + "\nadd rule inet ovn-kubernetes mgmtport-snat blah blah blah\n" + err = nodenft.MatchNFTRules(expectedNFT, nft.Dump()) + Expect(err).NotTo(HaveOccurred()) + fakeOvnNode.start(ctx, &v1.ServiceList{ Items: []v1.Service{ @@ -371,6 +399,7 @@ var _ = Describe("Node Operations", func() { fNPW.watchFactory = fakeOvnNode.watcher Expect(startNodePortWatcher(fNPW, fakeOvnNode.fakeClient, &fakeMgmtPortConfig)).To(Succeed()) Expect(fakeOvnNode.fakeExec.CalledMatchesExpected()).To(BeTrue(), fExec.ErrorDesc) + Expect(setupManagementPortNFTables(&fakeMgmtPortConfig)).To(Succeed()) expectedTables = map[string]util.FakeTable{ "nat": { @@ -391,10 +420,9 @@ var _ = Describe("Node Operations", func() { "OVN-KUBE-EXTERNALIP": []string{ fmt.Sprintf("-p %s -d %s --dport %v -j DNAT --to-destination %s:%v", service.Spec.Ports[0].Protocol, externalIP, service.Spec.Ports[0].Port, service.Spec.ClusterIP, service.Spec.Ports[0].Port), }, - "OVN-KUBE-SNAT-MGMTPORT": []string{}, - "OVN-KUBE-ETP": []string{}, - "OVN-KUBE-ITP": []string{}, - "OVN-KUBE-EGRESS-SVC": []string{}, + "OVN-KUBE-ETP": []string{}, + "OVN-KUBE-ITP": []string{}, + "OVN-KUBE-EGRESS-SVC": []string{}, }, "filter": {}, "mangle": { @@ -409,6 +437,10 @@ var _ = Describe("Node Operations", func() { err = f4.MatchState(expectedTables, nil) Expect(err).NotTo(HaveOccurred()) + expectedNFT = getBaseNFTRules(fakeMgmtPortConfig.ifName) + err = nodenft.MatchNFTRules(expectedNFT, nft.Dump()) + Expect(err).NotTo(HaveOccurred()) + return nil } err := app.Run([]string{app.Name}) @@ -468,10 +500,9 @@ var _ = Describe("Node Operations", func() { "OVN-KUBE-EXTERNALIP": []string{ fmt.Sprintf("-p %s -d %s --dport %v -j DNAT --to-destination %s:%v", service.Spec.Ports[0].Protocol, externalIP, service.Spec.Ports[0].Port, service.Spec.ClusterIP, service.Spec.Ports[0].Port), }, - "OVN-KUBE-SNAT-MGMTPORT": []string{}, - "OVN-KUBE-ETP": []string{}, - "OVN-KUBE-ITP": []string{}, - "OVN-KUBE-EGRESS-SVC": []string{}, + "OVN-KUBE-ETP": []string{}, + "OVN-KUBE-ITP": []string{}, + "OVN-KUBE-EGRESS-SVC": []string{}, }, "filter": {}, "mangle": { @@ -485,6 +516,10 @@ var _ = Describe("Node Operations", func() { f4 := iptV4.(*util.FakeIPTables) err = f4.MatchState(expectedTables, nil) Expect(err).NotTo(HaveOccurred()) + + expectedNFT := getBaseNFTRules(fakeMgmtPortConfig.ifName) + err = nodenft.MatchNFTRules(expectedNFT, nft.Dump()) + return nil } err := app.Run([]string{app.Name}) @@ -546,11 +581,10 @@ var _ = Describe("Node Operations", func() { "OVN-KUBE-NODEPORT": []string{ fmt.Sprintf("-p %s -m addrtype --dst-type LOCAL --dport %v -j DNAT --to-destination %s:%v", service.Spec.Ports[0].Protocol, service.Spec.Ports[0].NodePort, service.Spec.ClusterIP, service.Spec.Ports[0].Port), }, - "OVN-KUBE-EXTERNALIP": []string{}, - "OVN-KUBE-SNAT-MGMTPORT": []string{}, - "OVN-KUBE-ETP": []string{}, - "OVN-KUBE-ITP": []string{}, - "OVN-KUBE-EGRESS-SVC": []string{}, + "OVN-KUBE-EXTERNALIP": []string{}, + "OVN-KUBE-ETP": []string{}, + "OVN-KUBE-ITP": []string{}, + "OVN-KUBE-EGRESS-SVC": []string{}, }, "filter": {}, "mangle": { @@ -564,6 +598,10 @@ var _ = Describe("Node Operations", func() { f4 := iptV4.(*util.FakeIPTables) err = f4.MatchState(expectedTables, nil) Expect(err).NotTo(HaveOccurred()) + + expectedNFT := getBaseNFTRules(fakeMgmtPortConfig.ifName) + err = nodenft.MatchNFTRules(expectedNFT, nft.Dump()) + return nil } err := app.Run([]string{app.Name}) @@ -636,9 +674,6 @@ var _ = Describe("Node Operations", func() { fmt.Sprintf("-p %s -m addrtype --dst-type LOCAL --dport %v -j DNAT --to-destination %s:%v", service.Spec.Ports[0].Protocol, service.Spec.Ports[0].NodePort, service.Spec.ClusterIP, service.Spec.Ports[0].Port), }, "OVN-KUBE-EXTERNALIP": []string{}, - "OVN-KUBE-SNAT-MGMTPORT": []string{ - fmt.Sprintf("-p TCP --dport %v -j RETURN", service.Spec.Ports[0].NodePort), - }, "OVN-KUBE-ETP": []string{ fmt.Sprintf("-p %s -m addrtype --dst-type LOCAL --dport %v -j DNAT --to-destination %s:%v", service.Spec.Ports[0].Protocol, service.Spec.Ports[0].NodePort, config.Gateway.MasqueradeIPs.V4HostETPLocalMasqueradeIP.String(), service.Spec.Ports[0].NodePort), }, @@ -657,8 +692,15 @@ var _ = Describe("Node Operations", func() { f4 := iptV4.(*util.FakeIPTables) err = f4.MatchState(expectedTables, nil) Expect(err).NotTo(HaveOccurred()) + + expectedNFT := getBaseNFTRules(fakeMgmtPortConfig.ifName) + expectedNFT += fmt.Sprintf("add element inet ovn-kubernetes mgmtport-no-snat-nodeports { tcp . %v }\n", service.Spec.Ports[0].NodePort) + err = nodenft.MatchNFTRules(expectedNFT, nft.Dump()) + Expect(err).NotTo(HaveOccurred()) + flows := fNPW.ofm.flowCache["NodePort_namespace1_service1_tcp_31111"] Expect(flows).To(BeNil()) + return nil } err := app.Run([]string{app.Name}) @@ -735,10 +777,9 @@ var _ = Describe("Node Operations", func() { fmt.Sprintf("-p %s -d %s --dport %v -j DNAT --to-destination %s:%v", service.Spec.Ports[0].Protocol, service.Status.LoadBalancer.Ingress[0].IP, service.Spec.Ports[0].Port, service.Spec.ClusterIP, service.Spec.Ports[0].Port), fmt.Sprintf("-p %s -d %s --dport %v -j DNAT --to-destination %s:%v", service.Spec.Ports[0].Protocol, externalIP, service.Spec.Ports[0].Port, service.Spec.ClusterIP, service.Spec.Ports[0].Port), }, - "OVN-KUBE-SNAT-MGMTPORT": []string{}, - "OVN-KUBE-ETP": []string{}, - "OVN-KUBE-ITP": []string{}, - "OVN-KUBE-EGRESS-SVC": []string{}, + "OVN-KUBE-ETP": []string{}, + "OVN-KUBE-ITP": []string{}, + "OVN-KUBE-EGRESS-SVC": []string{}, }, "filter": {}, "mangle": { @@ -752,6 +793,10 @@ var _ = Describe("Node Operations", func() { f4 := iptV4.(*util.FakeIPTables) err = f4.MatchState(expectedTables, nil) Expect(err).NotTo(HaveOccurred()) + + expectedNFT := getBaseNFTRules(fakeMgmtPortConfig.ifName) + err = nodenft.MatchNFTRules(expectedNFT, nft.Dump()) + return nil } err := app.Run([]string{app.Name}) @@ -829,9 +874,6 @@ var _ = Describe("Node Operations", func() { "OVN-KUBE-NODEPORT": []string{ fmt.Sprintf("-p %s -m addrtype --dst-type LOCAL --dport %v -j DNAT --to-destination %s:%v", service.Spec.Ports[0].Protocol, service.Spec.Ports[0].NodePort, service.Spec.ClusterIP, service.Spec.Ports[0].Port), }, - "OVN-KUBE-SNAT-MGMTPORT": []string{ - fmt.Sprintf("-p TCP --dport %v -j RETURN", service.Spec.Ports[0].NodePort), - }, "OVN-KUBE-EXTERNALIP": []string{ fmt.Sprintf("-p %s -d %s --dport %v -j DNAT --to-destination %s:%v", service.Spec.Ports[0].Protocol, service.Status.LoadBalancer.Ingress[0].IP, service.Spec.Ports[0].Port, service.Spec.ClusterIP, service.Spec.Ports[0].Port), fmt.Sprintf("-p %s -d %s --dport %v -j DNAT --to-destination %s:%v", service.Spec.Ports[0].Protocol, externalIP, service.Spec.Ports[0].Port, service.Spec.ClusterIP, service.Spec.Ports[0].Port), @@ -862,6 +904,12 @@ var _ = Describe("Node Operations", func() { f4 := iptV4.(*util.FakeIPTables) err = f4.MatchState(expectedTables, nil) Expect(err).NotTo(HaveOccurred()) + + expectedNFT := getBaseNFTRules(fakeMgmtPortConfig.ifName) + expectedNFT += fmt.Sprintf("add element inet ovn-kubernetes mgmtport-no-snat-nodeports { tcp . %v }\n", service.Spec.Ports[0].NodePort) + err = nodenft.MatchNFTRules(expectedNFT, nft.Dump()) + Expect(err).NotTo(HaveOccurred()) + flows := fNPW.ofm.flowCache["NodePort_namespace1_service1_tcp_31111"] Expect(flows).To(BeNil()) flows = fNPW.ofm.flowCache["Ingress_namespace1_service1_5.5.5.5_8080"] @@ -966,10 +1014,6 @@ var _ = Describe("Node Operations", func() { "-j OVN-KUBE-EGRESS-SVC", }, "OVN-KUBE-NODEPORT": []string{}, - "OVN-KUBE-SNAT-MGMTPORT": []string{ - fmt.Sprintf("-p TCP -d %s --dport %d -j RETURN", ep1.Addresses[0], int32(service.Spec.Ports[0].TargetPort.IntValue())), - fmt.Sprintf("-p TCP -d %s --dport %d -j RETURN", ep2.Addresses[0], int32(service.Spec.Ports[0].TargetPort.IntValue())), - }, "OVN-KUBE-EXTERNALIP": []string{ fmt.Sprintf("-p %s -d %s --dport %v -j DNAT --to-destination %s:%v", service.Spec.Ports[0].Protocol, service.Status.LoadBalancer.Ingress[0].IP, service.Spec.Ports[0].Port, service.Spec.ClusterIP, service.Spec.Ports[0].Port), fmt.Sprintf("-p %s -d %s --dport %v -j DNAT --to-destination %s:%v", service.Spec.Ports[0].Protocol, externalIP, service.Spec.Ports[0].Port, service.Spec.ClusterIP, service.Spec.Ports[0].Port), @@ -1000,6 +1044,13 @@ var _ = Describe("Node Operations", func() { f4 := iptV4.(*util.FakeIPTables) Expect(f4.MatchState(expectedTables, nil)).To(Succeed()) + + expectedNFT := getBaseNFTRules(fakeMgmtPortConfig.ifName) + expectedNFT += fmt.Sprintf("add element inet ovn-kubernetes mgmtport-no-snat-services-v4 { %s . tcp . %d }\n", ep1.Addresses[0], int32(service.Spec.Ports[0].TargetPort.IntValue())) + expectedNFT += fmt.Sprintf("add element inet ovn-kubernetes mgmtport-no-snat-services-v4 { %s . tcp . %d }\n", ep2.Addresses[0], int32(service.Spec.Ports[0].TargetPort.IntValue())) + err := nodenft.MatchNFTRules(expectedNFT, nft.Dump()) + Expect(err).NotTo(HaveOccurred()) + Expect(fNPW.ofm.flowCache["Ingress_namespace1_service1_5.5.5.5_80"]).To(Equal(expectedLBIngressFlows)) Expect(fNPW.ofm.flowCache["External_namespace1_service1_1.1.1.1_80"]).To(Equal(expectedLBExternalIPFlows)) return nil @@ -1078,7 +1129,6 @@ var _ = Describe("Node Operations", func() { "OVN-KUBE-NODEPORT": []string{ fmt.Sprintf("-p %s -m addrtype --dst-type LOCAL --dport %v -j DNAT --to-destination %s:%v", service.Spec.Ports[0].Protocol, service.Spec.Ports[0].NodePort, service.Spec.ClusterIP, service.Spec.Ports[0].Port), }, - "OVN-KUBE-SNAT-MGMTPORT": []string{}, "OVN-KUBE-EXTERNALIP": []string{ fmt.Sprintf("-p %s -d %s --dport %v -j DNAT --to-destination %s:%v", service.Spec.Ports[0].Protocol, service.Status.LoadBalancer.Ingress[0].IP, service.Spec.Ports[0].Port, service.Spec.ClusterIP, service.Spec.Ports[0].Port), fmt.Sprintf("-p %s -d %s --dport %v -j DNAT --to-destination %s:%v", service.Spec.Ports[0].Protocol, externalIP, service.Spec.Ports[0].Port, service.Spec.ClusterIP, service.Spec.Ports[0].Port), @@ -1106,6 +1156,10 @@ var _ = Describe("Node Operations", func() { f4 := iptV4.(*util.FakeIPTables) err = f4.MatchState(expectedTables, nil) Expect(err).NotTo(HaveOccurred()) + + expectedNFT := getBaseNFTRules(fakeMgmtPortConfig.ifName) + err = nodenft.MatchNFTRules(expectedNFT, nft.Dump()) + flows := fNPW.ofm.flowCache["NodePort_namespace1_service1_tcp_31111"] Expect(flows).To(BeNil()) flows = fNPW.ofm.flowCache["Ingress_namespace1_service1_5.5.5.5_8080"] @@ -1190,9 +1244,6 @@ var _ = Describe("Node Operations", func() { "OVN-KUBE-NODEPORT": []string{ fmt.Sprintf("-p %s -m addrtype --dst-type LOCAL --dport %v -j DNAT --to-destination %s:%v", service.Spec.Ports[0].Protocol, service.Spec.Ports[0].NodePort, service.Spec.ClusterIP, service.Spec.Ports[0].Port), }, - "OVN-KUBE-SNAT-MGMTPORT": []string{ - fmt.Sprintf("-p TCP --dport %v -j RETURN", service.Spec.Ports[0].NodePort), - }, "OVN-KUBE-EXTERNALIP": []string{ fmt.Sprintf("-p %s -d %s --dport %v -j DNAT --to-destination %s:%v", service.Spec.Ports[0].Protocol, service.Status.LoadBalancer.Ingress[0].IP, service.Spec.Ports[0].Port, service.Spec.ClusterIP, service.Spec.Ports[0].Port), fmt.Sprintf("-p %s -d %s --dport %v -j DNAT --to-destination %s:%v", service.Spec.Ports[0].Protocol, externalIP, service.Spec.Ports[0].Port, service.Spec.ClusterIP, service.Spec.Ports[0].Port), @@ -1235,6 +1286,12 @@ var _ = Describe("Node Operations", func() { f4 := iptV4.(*util.FakeIPTables) err = f4.MatchState(expectedTables, nil) Expect(err).NotTo(HaveOccurred()) + + expectedNFT := getBaseNFTRules(fakeMgmtPortConfig.ifName) + expectedNFT += fmt.Sprintf("add element inet ovn-kubernetes mgmtport-no-snat-nodeports { tcp . %v }\n", service.Spec.Ports[0].NodePort) + err = nodenft.MatchNFTRules(expectedNFT, nft.Dump()) + Expect(err).NotTo(HaveOccurred()) + flows := fNPW.ofm.flowCache["NodePort_namespace1_service1_tcp_31111"] Expect(flows).To(Equal(expectedNodePortFlows)) flows = fNPW.ofm.flowCache["Ingress_namespace1_service1_5.5.5.5_8080"] @@ -1307,11 +1364,10 @@ var _ = Describe("Node Operations", func() { "OVN-KUBE-NODEPORT": []string{ fmt.Sprintf("-p %s -m addrtype --dst-type LOCAL --dport %v -j DNAT --to-destination %s:%v", service.Spec.Ports[0].Protocol, service.Spec.Ports[0].NodePort, service.Spec.ClusterIPs[0], service.Spec.Ports[0].Port), }, - "OVN-KUBE-EXTERNALIP": []string{}, - "OVN-KUBE-SNAT-MGMTPORT": []string{}, - "OVN-KUBE-ETP": []string{}, - "OVN-KUBE-ITP": []string{}, - "OVN-KUBE-EGRESS-SVC": []string{}, + "OVN-KUBE-EXTERNALIP": []string{}, + "OVN-KUBE-ETP": []string{}, + "OVN-KUBE-ITP": []string{}, + "OVN-KUBE-EGRESS-SVC": []string{}, }, "filter": {}, "mangle": { @@ -1338,6 +1394,10 @@ var _ = Describe("Node Operations", func() { f6 := iptV6.(*util.FakeIPTables) err = f6.MatchState(expectedTables6, nil) Expect(err).NotTo(HaveOccurred()) + + expectedNFT := getBaseNFTRules(fakeMgmtPortConfig.ifName) + err = nodenft.MatchNFTRules(expectedNFT, nft.Dump()) + return nil } err := app.Run([]string{app.Name}) @@ -1403,11 +1463,10 @@ var _ = Describe("Node Operations", func() { "OVN-KUBE-EXTERNALIP": []string{ fmt.Sprintf("-p %s -d %s --dport %v -j DNAT --to-destination %s:%v", service.Spec.Ports[0].Protocol, externalIPv4, service.Spec.Ports[0].Port, clusterIPv4, service.Spec.Ports[0].Port), }, - "OVN-KUBE-NODEPORT": []string{}, - "OVN-KUBE-SNAT-MGMTPORT": []string{}, - "OVN-KUBE-ETP": []string{}, - "OVN-KUBE-ITP": []string{}, - "OVN-KUBE-EGRESS-SVC": []string{}, + "OVN-KUBE-NODEPORT": []string{}, + "OVN-KUBE-ETP": []string{}, + "OVN-KUBE-ITP": []string{}, + "OVN-KUBE-EGRESS-SVC": []string{}, }, "filter": {}, "mangle": { @@ -1435,6 +1494,10 @@ var _ = Describe("Node Operations", func() { f6 := iptV6.(*util.FakeIPTables) err = f6.MatchState(expectedTables6, nil) Expect(err).NotTo(HaveOccurred()) + + expectedNFT := getBaseNFTRules(fakeMgmtPortConfig.ifName) + err = nodenft.MatchNFTRules(expectedNFT, nft.Dump()) + return nil } err := app.Run([]string{app.Name}) @@ -1495,12 +1558,11 @@ var _ = Describe("Node Operations", func() { "POSTROUTING": []string{ "-j OVN-KUBE-EGRESS-SVC", }, - "OVN-KUBE-NODEPORT": []string{}, - "OVN-KUBE-EXTERNALIP": []string{}, - "OVN-KUBE-SNAT-MGMTPORT": []string{}, - "OVN-KUBE-ETP": []string{}, - "OVN-KUBE-ITP": []string{}, - "OVN-KUBE-EGRESS-SVC": []string{}, + "OVN-KUBE-NODEPORT": []string{}, + "OVN-KUBE-EXTERNALIP": []string{}, + "OVN-KUBE-ETP": []string{}, + "OVN-KUBE-ITP": []string{}, + "OVN-KUBE-EGRESS-SVC": []string{}, }, "filter": {}, "mangle": { @@ -1514,6 +1576,7 @@ var _ = Describe("Node Operations", func() { f4 := iptV4.(*util.FakeIPTables) err = f4.MatchState(expectedTables, nil) Expect(err).NotTo(HaveOccurred()) + expectedTables = map[string]util.FakeTable{ "nat": {}, "filter": {}, @@ -1522,6 +1585,10 @@ var _ = Describe("Node Operations", func() { f6 := iptV6.(*util.FakeIPTables) err = f6.MatchState(expectedTables, nil) Expect(err).NotTo(HaveOccurred()) + + expectedNFT := getBaseNFTRules(fakeMgmtPortConfig.ifName) + err = nodenft.MatchNFTRules(expectedNFT, nft.Dump()) + return nil } err := app.Run([]string{app.Name}) @@ -1576,12 +1643,11 @@ var _ = Describe("Node Operations", func() { "POSTROUTING": []string{ "-j OVN-KUBE-EGRESS-SVC", }, - "OVN-KUBE-NODEPORT": []string{}, - "OVN-KUBE-EXTERNALIP": []string{}, - "OVN-KUBE-SNAT-MGMTPORT": []string{}, - "OVN-KUBE-ETP": []string{}, - "OVN-KUBE-ITP": []string{}, - "OVN-KUBE-EGRESS-SVC": []string{}, + "OVN-KUBE-NODEPORT": []string{}, + "OVN-KUBE-EXTERNALIP": []string{}, + "OVN-KUBE-ETP": []string{}, + "OVN-KUBE-ITP": []string{}, + "OVN-KUBE-EGRESS-SVC": []string{}, }, "filter": {}, "mangle": { @@ -1605,6 +1671,10 @@ var _ = Describe("Node Operations", func() { f6 := iptV6.(*util.FakeIPTables) err = f6.MatchState(expectedTables, nil) Expect(err).NotTo(HaveOccurred()) + + expectedNFT := getBaseNFTRules(fakeMgmtPortConfig.ifName) + err = nodenft.MatchNFTRules(expectedNFT, nft.Dump()) + return nil } err := app.Run([]string{app.Name}) @@ -1667,11 +1737,10 @@ var _ = Describe("Node Operations", func() { service.Spec.Ports[0].Protocol, externalIP, service.Spec.Ports[0].Port, service.Spec.ClusterIP, service.Spec.Ports[0].Port), }, - "OVN-KUBE-NODEPORT": []string{}, - "OVN-KUBE-SNAT-MGMTPORT": []string{}, - "OVN-KUBE-ETP": []string{}, - "OVN-KUBE-ITP": []string{}, - "OVN-KUBE-EGRESS-SVC": []string{}, + "OVN-KUBE-NODEPORT": []string{}, + "OVN-KUBE-ETP": []string{}, + "OVN-KUBE-ITP": []string{}, + "OVN-KUBE-EGRESS-SVC": []string{}, }, "filter": {}, "mangle": { @@ -1686,6 +1755,9 @@ var _ = Describe("Node Operations", func() { err = f4.MatchState(expectedTables, nil) Expect(err).NotTo(HaveOccurred()) + expectedNFT := getBaseNFTRules(fakeMgmtPortConfig.ifName) + err = nodenft.MatchNFTRules(expectedNFT, nft.Dump()) + addConntrackMocks(netlinkMock, []ctFilterDesc{{"10.10.10.1", 8034}, {"10.129.0.2", 8034}}) err = fNPW.DeleteService(&service) Expect(err).NotTo(HaveOccurred()) @@ -1707,10 +1779,9 @@ var _ = Describe("Node Operations", func() { "POSTROUTING": []string{ "-j OVN-KUBE-EGRESS-SVC", }, - "OVN-KUBE-SNAT-MGMTPORT": []string{}, - "OVN-KUBE-ETP": []string{}, - "OVN-KUBE-ITP": []string{}, - "OVN-KUBE-EGRESS-SVC": []string{}, + "OVN-KUBE-ETP": []string{}, + "OVN-KUBE-ITP": []string{}, + "OVN-KUBE-EGRESS-SVC": []string{}, }, "filter": {}, "mangle": { @@ -1724,6 +1795,10 @@ var _ = Describe("Node Operations", func() { f4 = iptV4.(*util.FakeIPTables) err = f4.MatchState(expectedTables, nil) Expect(err).NotTo(HaveOccurred()) + + expectedNFT = getBaseNFTRules(fakeMgmtPortConfig.ifName) + err = nodenft.MatchNFTRules(expectedNFT, nft.Dump()) + return nil } err := app.Run([]string{app.Name}) @@ -1996,11 +2071,10 @@ var _ = Describe("Node Operations", func() { "OVN-KUBE-NODEPORT": []string{ fmt.Sprintf("-p %s -m addrtype --dst-type LOCAL --dport %v -j DNAT --to-destination %s:%v", service.Spec.Ports[0].Protocol, nodePort, service.Spec.ClusterIP, service.Spec.Ports[0].Port), }, - "OVN-KUBE-EXTERNALIP": []string{}, - "OVN-KUBE-SNAT-MGMTPORT": []string{}, - "OVN-KUBE-ETP": []string{}, - "OVN-KUBE-ITP": []string{}, - "OVN-KUBE-EGRESS-SVC": []string{}, + "OVN-KUBE-EXTERNALIP": []string{}, + "OVN-KUBE-ETP": []string{}, + "OVN-KUBE-ITP": []string{}, + "OVN-KUBE-EGRESS-SVC": []string{}, }, "filter": {}, "mangle": { @@ -2015,6 +2089,9 @@ var _ = Describe("Node Operations", func() { err = f4.MatchState(expectedTables, nil) Expect(err).NotTo(HaveOccurred()) + expectedNFT := getBaseNFTRules(fakeMgmtPortConfig.ifName) + err = nodenft.MatchNFTRules(expectedNFT, nft.Dump()) + addConntrackMocks(netlinkMock, []ctFilterDesc{{"10.129.0.2", 8080}, {"192.168.18.15", 38034}}) err = fNPW.DeleteService(&service) Expect(err).NotTo(HaveOccurred()) @@ -2036,10 +2113,9 @@ var _ = Describe("Node Operations", func() { "POSTROUTING": []string{ "-j OVN-KUBE-EGRESS-SVC", }, - "OVN-KUBE-SNAT-MGMTPORT": []string{}, - "OVN-KUBE-ETP": []string{}, - "OVN-KUBE-ITP": []string{}, - "OVN-KUBE-EGRESS-SVC": []string{}, + "OVN-KUBE-ETP": []string{}, + "OVN-KUBE-ITP": []string{}, + "OVN-KUBE-EGRESS-SVC": []string{}, }, "filter": {}, "mangle": { @@ -2054,6 +2130,9 @@ var _ = Describe("Node Operations", func() { err = f4.MatchState(expectedTables, nil) Expect(err).NotTo(HaveOccurred()) + expectedNFT = getBaseNFTRules(fakeMgmtPortConfig.ifName) + err = nodenft.MatchNFTRules(expectedNFT, nft.Dump()) + return nil } err := app.Run([]string{app.Name}) @@ -2126,9 +2205,6 @@ var _ = Describe("Node Operations", func() { fmt.Sprintf("-p %s -m addrtype --dst-type LOCAL --dport %v -j DNAT --to-destination %s:%v", service.Spec.Ports[0].Protocol, service.Spec.Ports[0].NodePort, service.Spec.ClusterIP, service.Spec.Ports[0].Port), }, "OVN-KUBE-EXTERNALIP": []string{}, - "OVN-KUBE-SNAT-MGMTPORT": []string{ - fmt.Sprintf("-p TCP --dport %v -j RETURN", service.Spec.Ports[0].NodePort), - }, "OVN-KUBE-ETP": []string{ fmt.Sprintf("-p %s -m addrtype --dst-type LOCAL --dport %v -j DNAT --to-destination %s:%v", service.Spec.Ports[0].Protocol, service.Spec.Ports[0].NodePort, config.Gateway.MasqueradeIPs.V4HostETPLocalMasqueradeIP.String(), service.Spec.Ports[0].NodePort), }, @@ -2146,7 +2222,12 @@ var _ = Describe("Node Operations", func() { f4 := iptV4.(*util.FakeIPTables) err = f4.MatchState(expectedTables, nil) + + expectedNFT := getBaseNFTRules(fakeMgmtPortConfig.ifName) + expectedNFT += fmt.Sprintf("add element inet ovn-kubernetes mgmtport-no-snat-nodeports { tcp . %v }\n", service.Spec.Ports[0].NodePort) + err = nodenft.MatchNFTRules(expectedNFT, nft.Dump()) Expect(err).NotTo(HaveOccurred()) + flows := fNPW.ofm.flowCache["NodePort_namespace1_service1_tcp_31111"] Expect(flows).To(BeNil()) @@ -2171,10 +2252,9 @@ var _ = Describe("Node Operations", func() { "POSTROUTING": []string{ "-j OVN-KUBE-EGRESS-SVC", }, - "OVN-KUBE-SNAT-MGMTPORT": []string{}, - "OVN-KUBE-ETP": []string{}, - "OVN-KUBE-ITP": []string{}, - "OVN-KUBE-EGRESS-SVC": []string{}, + "OVN-KUBE-ETP": []string{}, + "OVN-KUBE-ITP": []string{}, + "OVN-KUBE-EGRESS-SVC": []string{}, }, "filter": {}, "mangle": { @@ -2189,6 +2269,9 @@ var _ = Describe("Node Operations", func() { err = f4.MatchState(expectedTables, nil) Expect(err).NotTo(HaveOccurred()) + expectedNFT = getBaseNFTRules(fakeMgmtPortConfig.ifName) + err = nodenft.MatchNFTRules(expectedNFT, nft.Dump()) + flows = fNPW.ofm.flowCache["NodePort_namespace1_service1_tcp_31111"] Expect(flows).To(BeNil()) @@ -2265,9 +2348,6 @@ var _ = Describe("Node Operations", func() { fmt.Sprintf("-p %s -m addrtype --dst-type LOCAL --dport %v -j DNAT --to-destination %s:%v", service.Spec.Ports[0].Protocol, service.Spec.Ports[0].NodePort, service.Spec.ClusterIP, service.Spec.Ports[0].Port), }, "OVN-KUBE-EXTERNALIP": []string{}, - "OVN-KUBE-SNAT-MGMTPORT": []string{ - fmt.Sprintf("-p TCP --dport %v -j RETURN", service.Spec.Ports[0].NodePort), - }, "OVN-KUBE-ETP": []string{}, "OVN-KUBE-ITP": []string{}, "OVN-KUBE-EGRESS-SVC": []string{}, @@ -2290,6 +2370,12 @@ var _ = Describe("Node Operations", func() { f4 := iptV4.(*util.FakeIPTables) err = f4.MatchState(expectedTables, nil) Expect(err).NotTo(HaveOccurred()) + + expectedNFT := getBaseNFTRules(fakeMgmtPortConfig.ifName) + expectedNFT += fmt.Sprintf("add element inet ovn-kubernetes mgmtport-no-snat-nodeports { tcp . %v }\n", service.Spec.Ports[0].NodePort) + err = nodenft.MatchNFTRules(expectedNFT, nft.Dump()) + Expect(err).NotTo(HaveOccurred()) + flows := fNPW.ofm.flowCache["NodePort_namespace1_service1_tcp_31111"] Expect(flows).To(Equal(expectedFlows)) @@ -2314,10 +2400,9 @@ var _ = Describe("Node Operations", func() { "POSTROUTING": []string{ "-j OVN-KUBE-EGRESS-SVC", }, - "OVN-KUBE-SNAT-MGMTPORT": []string{}, - "OVN-KUBE-ETP": []string{}, - "OVN-KUBE-ITP": []string{}, - "OVN-KUBE-EGRESS-SVC": []string{}, + "OVN-KUBE-ETP": []string{}, + "OVN-KUBE-ITP": []string{}, + "OVN-KUBE-EGRESS-SVC": []string{}, }, "filter": {}, "mangle": { @@ -2332,6 +2417,9 @@ var _ = Describe("Node Operations", func() { err = f4.MatchState(expectedTables, nil) Expect(err).NotTo(HaveOccurred()) + expectedNFT = getBaseNFTRules(fakeMgmtPortConfig.ifName) + err = nodenft.MatchNFTRules(expectedNFT, nft.Dump()) + flows = fNPW.ofm.flowCache["NodePort_namespace1_service1_tcp_31111"] Expect(flows).To(BeNil()) @@ -2413,11 +2501,10 @@ var _ = Describe("Node Operations", func() { "OVN-KUBE-NODEPORT": []string{ fmt.Sprintf("-p %s -m addrtype --dst-type LOCAL --dport %v -j DNAT --to-destination %s:%v", service.Spec.Ports[0].Protocol, service.Spec.Ports[0].NodePort, service.Spec.ClusterIP, service.Spec.Ports[0].Port), }, - "OVN-KUBE-EXTERNALIP": []string{}, - "OVN-KUBE-SNAT-MGMTPORT": []string{}, - "OVN-KUBE-ETP": []string{}, - "OVN-KUBE-ITP": []string{}, - "OVN-KUBE-EGRESS-SVC": []string{}, + "OVN-KUBE-EXTERNALIP": []string{}, + "OVN-KUBE-ETP": []string{}, + "OVN-KUBE-ITP": []string{}, + "OVN-KUBE-EGRESS-SVC": []string{}, }, "filter": {}, "mangle": { @@ -2437,6 +2524,10 @@ var _ = Describe("Node Operations", func() { f4 := iptV4.(*util.FakeIPTables) err = f4.MatchState(expectedTables, nil) Expect(err).NotTo(HaveOccurred()) + + expectedNFT := getBaseNFTRules(fakeMgmtPortConfig.ifName) + err = nodenft.MatchNFTRules(expectedNFT, nft.Dump()) + flows := fNPW.ofm.flowCache["NodePort_namespace1_service1_tcp_31111"] Expect(flows).To(Equal(expectedFlows)) @@ -2461,10 +2552,9 @@ var _ = Describe("Node Operations", func() { "POSTROUTING": []string{ "-j OVN-KUBE-EGRESS-SVC", }, - "OVN-KUBE-SNAT-MGMTPORT": []string{}, - "OVN-KUBE-ITP": []string{}, - "OVN-KUBE-ETP": []string{}, - "OVN-KUBE-EGRESS-SVC": []string{}, + "OVN-KUBE-ITP": []string{}, + "OVN-KUBE-ETP": []string{}, + "OVN-KUBE-EGRESS-SVC": []string{}, }, "filter": {}, "mangle": { @@ -2479,6 +2569,9 @@ var _ = Describe("Node Operations", func() { err = f4.MatchState(expectedTables, nil) Expect(err).NotTo(HaveOccurred()) + expectedNFT = getBaseNFTRules(fakeMgmtPortConfig.ifName) + err = nodenft.MatchNFTRules(expectedNFT, nft.Dump()) + flows = fNPW.ofm.flowCache["NodePort_namespace1_service1_tcp_31111"] Expect(flows).To(BeNil()) @@ -2554,9 +2647,6 @@ var _ = Describe("Node Operations", func() { fmt.Sprintf("-p %s -m addrtype --dst-type LOCAL --dport %v -j DNAT --to-destination %s:%v", service.Spec.Ports[0].Protocol, service.Spec.Ports[0].NodePort, service.Spec.ClusterIP, service.Spec.Ports[0].Port), }, "OVN-KUBE-EXTERNALIP": []string{}, - "OVN-KUBE-SNAT-MGMTPORT": []string{ - fmt.Sprintf("-p TCP --dport %v -j RETURN", service.Spec.Ports[0].NodePort), - }, "OVN-KUBE-ITP": []string{}, "OVN-KUBE-ETP": []string{}, "OVN-KUBE-EGRESS-SVC": []string{}, @@ -2581,6 +2671,12 @@ var _ = Describe("Node Operations", func() { f4 := iptV4.(*util.FakeIPTables) err = f4.MatchState(expectedTables, nil) Expect(err).NotTo(HaveOccurred()) + + expectedNFT := getBaseNFTRules(fakeMgmtPortConfig.ifName) + expectedNFT += fmt.Sprintf("add element inet ovn-kubernetes mgmtport-no-snat-nodeports { tcp . %v }\n", service.Spec.Ports[0].NodePort) + err = nodenft.MatchNFTRules(expectedNFT, nft.Dump()) + Expect(err).NotTo(HaveOccurred()) + flows := fNPW.ofm.flowCache["NodePort_namespace1_service1_tcp_31111"] Expect(flows).To(Equal(expectedFlows)) @@ -2606,9 +2702,8 @@ var _ = Describe("Node Operations", func() { "POSTROUTING": []string{ "-j OVN-KUBE-EGRESS-SVC", }, - "OVN-KUBE-SNAT-MGMTPORT": []string{}, - "OVN-KUBE-ETP": []string{}, - "OVN-KUBE-EGRESS-SVC": []string{}, + "OVN-KUBE-ETP": []string{}, + "OVN-KUBE-EGRESS-SVC": []string{}, }, "filter": {}, "mangle": { @@ -2623,6 +2718,9 @@ var _ = Describe("Node Operations", func() { err = f4.MatchState(expectedTables, nil) Expect(err).NotTo(HaveOccurred()) + expectedNFT = getBaseNFTRules(fakeMgmtPortConfig.ifName) + err = nodenft.MatchNFTRules(expectedNFT, nft.Dump()) + flows = fNPW.ofm.flowCache["NodePort_namespace1_service1_tcp_31111"] Expect(flows).To(BeNil()) @@ -2701,8 +2799,7 @@ var _ = Describe("Node Operations", func() { "OVN-KUBE-NODEPORT": []string{ fmt.Sprintf("-p %s -m addrtype --dst-type LOCAL --dport %v -j DNAT --to-destination %s:%v", service.Spec.Ports[0].Protocol, service.Spec.Ports[0].NodePort, service.Spec.ClusterIP, service.Spec.Ports[0].Port), }, - "OVN-KUBE-EXTERNALIP": []string{}, - "OVN-KUBE-SNAT-MGMTPORT": []string{}, + "OVN-KUBE-EXTERNALIP": []string{}, "OVN-KUBE-ITP": []string{ fmt.Sprintf("-p %s -d %s --dport %d -j REDIRECT --to-port %d", service.Spec.Ports[0].Protocol, service.Spec.ClusterIP, service.Spec.Ports[0].Port, int32(service.Spec.Ports[0].TargetPort.IntValue())), }, @@ -2727,6 +2824,10 @@ var _ = Describe("Node Operations", func() { f4 := iptV4.(*util.FakeIPTables) err = f4.MatchState(expectedTables, nil) Expect(err).NotTo(HaveOccurred()) + + expectedNFT := getBaseNFTRules(fakeMgmtPortConfig.ifName) + err = nodenft.MatchNFTRules(expectedNFT, nft.Dump()) + flows := fNPW.ofm.flowCache["NodePort_namespace1_service1_tcp_31111"] Expect(flows).To(Equal(expectedFlows)) @@ -2751,10 +2852,9 @@ var _ = Describe("Node Operations", func() { "POSTROUTING": []string{ "-j OVN-KUBE-EGRESS-SVC", }, - "OVN-KUBE-SNAT-MGMTPORT": []string{}, - "OVN-KUBE-ITP": []string{}, - "OVN-KUBE-ETP": []string{}, - "OVN-KUBE-EGRESS-SVC": []string{}, + "OVN-KUBE-ITP": []string{}, + "OVN-KUBE-ETP": []string{}, + "OVN-KUBE-EGRESS-SVC": []string{}, }, "filter": {}, "mangle": { @@ -2769,6 +2869,9 @@ var _ = Describe("Node Operations", func() { err = f4.MatchState(expectedTables, nil) Expect(err).NotTo(HaveOccurred()) + expectedNFT = getBaseNFTRules(fakeMgmtPortConfig.ifName) + err = nodenft.MatchNFTRules(expectedNFT, nft.Dump()) + flows = fNPW.ofm.flowCache["NodePort_namespace1_service1_tcp_31111"] Expect(flows).To(BeNil()) @@ -2803,10 +2906,9 @@ var _ = Describe("Node Operations", func() { "POSTROUTING": []string{ "-j OVN-KUBE-EGRESS-SVC", }, - "OVN-KUBE-SNAT-MGMTPORT": []string{}, - "OVN-KUBE-ETP": []string{}, - "OVN-KUBE-ITP": []string{}, - "OVN-KUBE-EGRESS-SVC": []string{}, + "OVN-KUBE-ETP": []string{}, + "OVN-KUBE-ITP": []string{}, + "OVN-KUBE-EGRESS-SVC": []string{}, }, "filter": { "FORWARD": []string{ @@ -2863,10 +2965,9 @@ var _ = Describe("Node Operations", func() { "POSTROUTING": []string{ "-j OVN-KUBE-EGRESS-SVC", }, - "OVN-KUBE-SNAT-MGMTPORT": []string{}, - "OVN-KUBE-ETP": []string{}, - "OVN-KUBE-ITP": []string{}, - "OVN-KUBE-EGRESS-SVC": []string{}, + "OVN-KUBE-ETP": []string{}, + "OVN-KUBE-ITP": []string{}, + "OVN-KUBE-EGRESS-SVC": []string{}, }, "filter": { "FORWARD": []string{}, diff --git a/go-controller/pkg/node/gateway_nftables.go b/go-controller/pkg/node/gateway_nftables.go new file mode 100644 index 0000000000..8cbc866007 --- /dev/null +++ b/go-controller/pkg/node/gateway_nftables.go @@ -0,0 +1,176 @@ +//go:build linux +// +build linux + +package node + +import ( + "context" + "fmt" + "strings" + + kapi "k8s.io/api/core/v1" + utilnet "k8s.io/utils/net" + "sigs.k8s.io/knftables" + + nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" +) + +// gateway_nftables.go contains code for dealing with nftables rules; it is used in +// conjunction with gateway_iptables.go. +// +// For the most part, using a mix of iptables and nftables rules does not matter, since +// both of them are handled by netfilter. However, in cases where there is a close +// ordering dependency between two rules (especially, in any case where it's necessary to +// use an "accept" rule to override a later "drop" rule), then those rules will need to +// either both be iptables or both be nftables. + +// getNoSNATNodePortRules returns elements to add to the "mgmtport-no-snat-nodeports" +// set to prevent SNAT of sourceIP when passing through the management port, for an +// `externalTrafficPolicy: Local` service with NodePorts. +func getNoSNATNodePortRules(svcPort kapi.ServicePort) []*knftables.Element { + return []*knftables.Element{ + { + Set: nftablesMgmtPortNoSNATNodePorts, + Key: []string{ + strings.ToLower(string(svcPort.Protocol)), + fmt.Sprintf("%d", svcPort.NodePort), + }, + }, + } +} + +// getNoSNATLoadBalancerIPRules returns elements to add to the +// "mgmtport-no-snat-services-v4" and "mgmtport-no-snat-services-v6" sets to prevent SNAT +// of sourceIP when passing through the management port, for an `externalTrafficPolicy: +// Local` service *without* NodePorts. +func getNoSNATLoadBalancerIPRules(svcPort kapi.ServicePort, localEndpoints []string) []*knftables.Element { + var nftRules []*knftables.Element + protocol := strings.ToLower(string(svcPort.Protocol)) + port := fmt.Sprintf("%v", svcPort.TargetPort.IntValue()) + for _, ip := range localEndpoints { + setName := nftablesMgmtPortNoSNATServicesV4 + if utilnet.IsIPv6String(ip) { + setName = nftablesMgmtPortNoSNATServicesV6 + } + + nftRules = append(nftRules, + &knftables.Element{ + Set: setName, + Key: []string{ip, protocol, port}, + }, + ) + } + return nftRules +} + +// getUDNNodePortMarkNFTRule returns a verdict map element (nftablesUDNMarkNodePortsMap) +// with a key composed of the svcPort protocol and port. +// The value is a jump to the UDN chain mark if netInfo is provided, or nil that is useful for map entry removal. +func getUDNNodePortMarkNFTRule(svcPort kapi.ServicePort, netInfo *bridgeUDNConfiguration) *knftables.Element { + var val []string + if netInfo != nil { + val = []string{fmt.Sprintf("jump %s", GetUDNMarkChain(netInfo.pktMark))} + } + return &knftables.Element{ + Map: nftablesUDNMarkNodePortsMap, + Key: []string{strings.ToLower(string(svcPort.Protocol)), fmt.Sprintf("%v", svcPort.NodePort)}, + Value: val, + } + +} + +// getUDNExternalIPsMarkNFTRules returns a verdict map elements (nftablesUDNMarkExternalIPsV4Map or nftablesUDNMarkExternalIPsV6Map) +// with a key composed of the external IP, svcPort protocol and port. +// The value is a jump to the UDN chain mark if netInfo is provided, or nil that is useful for map entry removal. +func getUDNExternalIPsMarkNFTRules(svcPort kapi.ServicePort, externalIPs []string, netInfo *bridgeUDNConfiguration) []*knftables.Element { + var nftRules []*knftables.Element + var val []string + + if netInfo != nil { + val = []string{fmt.Sprintf("jump %s", GetUDNMarkChain(netInfo.pktMark))} + } + for _, externalIP := range externalIPs { + mapName := nftablesUDNMarkExternalIPsV4Map + if utilnet.IsIPv6String(externalIP) { + mapName = nftablesUDNMarkExternalIPsV6Map + } + nftRules = append(nftRules, + &knftables.Element{ + Map: mapName, + Key: []string{externalIP, strings.ToLower(string(svcPort.Protocol)), fmt.Sprintf("%v", svcPort.Port)}, + Value: val, + }, + ) + + } + return nftRules +} + +func recreateNFTSet(setName string, keepNFTElems []*knftables.Element) error { + nft, err := nodenft.GetNFTablesHelper() + if err != nil { + return err + } + tx := nft.NewTransaction() + tx.Flush(&knftables.Set{ + Name: setName, + }) + for _, elem := range keepNFTElems { + if elem.Set == setName { + tx.Add(elem) + } + } + return nft.Run(context.TODO(), tx) +} + +func recreateNFTMap(mapName string, keepNFTElems []*knftables.Element) error { + nft, err := nodenft.GetNFTablesHelper() + if err != nil { + return err + } + tx := nft.NewTransaction() + tx.Flush(&knftables.Map{ + Name: mapName, + }) + for _, elem := range keepNFTElems { + if elem.Map == mapName { + tx.Add(elem) + } + } + return nft.Run(context.TODO(), tx) +} + +// getGatewayNFTRules returns nftables rules for service. This must be used in conjunction +// with getGatewayIPTRules. +func getGatewayNFTRules(service *kapi.Service, localEndpoints []string, svcHasLocalHostNetEndPnt bool) []*knftables.Element { + rules := make([]*knftables.Element, 0) + svcTypeIsETPLocal := util.ServiceExternalTrafficPolicyLocal(service) + for _, svcPort := range service.Spec.Ports { + if svcTypeIsETPLocal && !svcHasLocalHostNetEndPnt { + // For `externalTrafficPolicy: Local` services with pod-network + // endpoints, we need to add rules to prevent them from being SNATted + // when entering the management port, to preserve the client IP. + if util.ServiceTypeHasNodePort(service) { + rules = append(rules, getNoSNATNodePortRules(svcPort)...) + } else if len(util.GetExternalAndLBIPs(service)) > 0 { + rules = append(rules, getNoSNATLoadBalancerIPRules(svcPort, localEndpoints)...) + } + } + } + return rules +} + +// getUDNNFTRules generates nftables rules for a UDN service. +// If netConfig is nil, the resulting map elements will have empty values, +// suitable only for entry removal. +func getUDNNFTRules(service *kapi.Service, netConfig *bridgeUDNConfiguration) []*knftables.Element { + rules := make([]*knftables.Element, 0) + for _, svcPort := range service.Spec.Ports { + if util.ServiceTypeHasNodePort(service) { + rules = append(rules, getUDNNodePortMarkNFTRule(svcPort, netConfig)) + } + rules = append(rules, getUDNExternalIPsMarkNFTRules(svcPort, util.GetExternalAndLBIPs(service), netConfig)...) + } + return rules +} diff --git a/go-controller/pkg/node/gateway_shared_intf.go b/go-controller/pkg/node/gateway_shared_intf.go index 4d0e478377..289e1ae523 100644 --- a/go-controller/pkg/node/gateway_shared_intf.go +++ b/go-controller/pkg/node/gateway_shared_intf.go @@ -1,6 +1,7 @@ package node import ( + "context" "fmt" "hash/fnv" "math" @@ -9,17 +10,21 @@ import ( "strings" "sync" + "github.com/vishvananda/netlink" + "golang.org/x/sys/unix" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" + nad "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/network-attach-def-controller" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/controllers/egressservice" nodeipt "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/iptables" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/linkmanager" + nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/routemanager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" utilerrors "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/errors" - "github.com/vishvananda/netlink" - "golang.org/x/sys/unix" kapi "k8s.io/api/core/v1" discovery "k8s.io/api/discovery/v1" @@ -28,6 +33,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" utilnet "k8s.io/utils/net" + "sigs.k8s.io/knftables" ) const ( @@ -54,15 +60,133 @@ const ( // ovnKubeNodeSNATMark is used to mark packets that need to be SNAT-ed to nodeIP for // traffic originating from egressIP and egressService controlled pods towards other nodes in the cluster. ovnKubeNodeSNATMark = "0x3f0" + + // nftablesUDNServicePreroutingChain is a base chain registered into the prerouting hook, + // and it contains one rule that jumps to nftablesUDNServiceMarkChain. + // Traffic from the default network's management interface is bypassed + // to prevent enabling the default network access to the local node's UDN NodePort. + nftablesUDNServicePreroutingChain = "udn-service-prerouting" + + // nftablesUDNServiceOutputChain is a base chain registered into the output hook + // it contains one rule that jumps to nftablesUDNServiceMarkChain + nftablesUDNServiceOutputChain = "udn-service-output" + + // nftablesUDNServiceMarkChain is a regular chain trying to match the incoming traffic + // against the following UDN service verdict maps: nftablesUDNMarkNodePortsMap, + // nftablesUDNMarkExternalIPsV4Map, nftablesUDNMarkExternalIPsV6Map + nftablesUDNServiceMarkChain = "udn-service-mark" + + // nftablesUDNMarkNodePortsMap is a verdict maps containing + // localNodeIP / protocol / port keys indicating traffic that + // should be marked with a UDN specific value, which is used to direct the traffic + // to the appropriate network. + nftablesUDNMarkNodePortsMap = "udn-mark-nodeports" + + // nftablesUDNMarkExternalIPsV4Map and nftablesUDNMarkExternalIPsV6Map are verdict + // maps containing loadBalancerIP / protocol / port keys indicating traffic that + // should be marked with a UDN specific value, which is used to direct the traffic + // to the appropriate network. + nftablesUDNMarkExternalIPsV4Map = "udn-mark-external-ips-v4" + nftablesUDNMarkExternalIPsV6Map = "udn-mark-external-ips-v6" ) +// configureUDNServicesNFTables configures the nftables chains, rules, and verdict maps +// that are used to set packet marks on externally exposed UDN services +func configureUDNServicesNFTables() error { + nft, err := nodenft.GetNFTablesHelper() + if err != nil { + return err + } + tx := nft.NewTransaction() + + tx.Add(&knftables.Chain{ + Name: nftablesUDNServiceMarkChain, + Comment: knftables.PtrTo("UDN services packet mark"), + }) + tx.Flush(&knftables.Chain{Name: nftablesUDNServiceMarkChain}) + + tx.Add(&knftables.Chain{ + Name: nftablesUDNServicePreroutingChain, + Comment: knftables.PtrTo("UDN services packet mark - Prerouting"), + + Type: knftables.PtrTo(knftables.FilterType), + Hook: knftables.PtrTo(knftables.PreroutingHook), + Priority: knftables.PtrTo(knftables.ManglePriority), + }) + tx.Flush(&knftables.Chain{Name: nftablesUDNServicePreroutingChain}) + + tx.Add(&knftables.Rule{ + Chain: nftablesUDNServicePreroutingChain, + Rule: knftables.Concat( + "iifname", "!=", fmt.Sprintf("%q", types.K8sMgmtIntfName), + "jump", nftablesUDNServiceMarkChain, + ), + }) + + tx.Add(&knftables.Chain{ + Name: nftablesUDNServiceOutputChain, + Comment: knftables.PtrTo("UDN services packet mark - Output"), + + Type: knftables.PtrTo(knftables.FilterType), + Hook: knftables.PtrTo(knftables.OutputHook), + Priority: knftables.PtrTo(knftables.ManglePriority), + }) + tx.Flush(&knftables.Chain{Name: nftablesUDNServiceOutputChain}) + tx.Add(&knftables.Rule{ + Chain: nftablesUDNServiceOutputChain, + Rule: knftables.Concat( + "jump", nftablesUDNServiceMarkChain, + ), + }) + + tx.Add(&knftables.Map{ + Name: nftablesUDNMarkNodePortsMap, + Comment: knftables.PtrTo("UDN services NodePorts mark"), + Type: "inet_proto . inet_service : verdict", + }) + tx.Add(&knftables.Map{ + Name: nftablesUDNMarkExternalIPsV4Map, + Comment: knftables.PtrTo("UDN services External IPs mark (IPv4)"), + Type: "ipv4_addr . inet_proto . inet_service : verdict", + }) + tx.Add(&knftables.Map{ + Name: nftablesUDNMarkExternalIPsV6Map, + Comment: knftables.PtrTo("UDN services External IPs mark (IPv6)"), + Type: "ipv6_addr . inet_proto . inet_service : verdict", + }) + + tx.Add(&knftables.Rule{ + Chain: nftablesUDNServiceMarkChain, + Rule: knftables.Concat( + "fib daddr type local meta l4proto . th dport vmap", "@", nftablesUDNMarkNodePortsMap, + ), + }) + tx.Add(&knftables.Rule{ + Chain: nftablesUDNServiceMarkChain, + Rule: knftables.Concat( + "ip daddr . meta l4proto . th dport vmap", "@", nftablesUDNMarkExternalIPsV4Map, + ), + }) + tx.Add(&knftables.Rule{ + Chain: nftablesUDNServiceMarkChain, + Rule: knftables.Concat( + "ip6 daddr . meta l4proto . th dport vmap", "@", nftablesUDNMarkExternalIPsV6Map, + ), + }) + + return nft.Run(context.TODO(), tx) +} + // nodePortWatcherIptables manages iptables rules for shared gateway // to ensure that services using NodePorts are accessible. type nodePortWatcherIptables struct { + nadController *nad.NetAttachDefinitionController } -func newNodePortWatcherIptables() *nodePortWatcherIptables { - return &nodePortWatcherIptables{} +func newNodePortWatcherIptables(nadController *nad.NetAttachDefinitionController) *nodePortWatcherIptables { + return &nodePortWatcherIptables{ + nadController: nadController, + } } // nodePortWatcher manages OpenFlow and iptables rules @@ -73,13 +197,13 @@ type nodePortWatcher struct { gatewayIPv6 string gatewayIPLock sync.Mutex ofportPhys string - ofportPatch string gwBridge string // Map of service name to programmed iptables/OF rules serviceInfo map[ktypes.NamespacedName]*serviceConfig serviceInfoLock sync.Mutex ofm *openflowManager nodeIPManager *addressManager + nadController *nad.NetAttachDefinitionController watchFactory factory.NodeWatchFactory } @@ -126,12 +250,23 @@ func (npw *nodePortWatcher) updateGatewayIPs(addressManager *addressManager) { // // `add` parameter indicates if the flows should exist or be removed from the cache // `hasLocalHostNetworkEp` indicates if at least one host networked endpoint exists for this service which is local to this node. -func (npw *nodePortWatcher) updateServiceFlowCache(service *kapi.Service, add, hasLocalHostNetworkEp bool) error { +func (npw *nodePortWatcher) updateServiceFlowCache(service *kapi.Service, netInfo util.NetInfo, add, hasLocalHostNetworkEp bool) error { if config.Gateway.Mode == config.GatewayModeLocal && config.Gateway.AllowNoUplink && npw.ofportPhys == "" { // if LGW mode and no uplink gateway bridge, ingress traffic enters host from node physical interface instead of the breth0. Skip adding these service flows to br-ex. return nil } + var netConfig *bridgeUDNConfiguration + var actions string + + if add { + netConfig = npw.ofm.getActiveNetwork(netInfo) + if netConfig == nil { + return fmt.Errorf("failed to get active network config for network %s", netInfo.GetNetworkName()) + } + actions = fmt.Sprintf("output:%s", netConfig.ofPortPatch) + } + // CAUTION: when adding new flows where the in_port is ofPortPatch and the out_port is ofPortPhys, ensure // that dl_src is included in match criteria! @@ -143,8 +278,6 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *kapi.Service, add, h isServiceTypeETPLocal := util.ServiceExternalTrafficPolicyLocal(service) - actions := fmt.Sprintf("output:%s", npw.ofportPatch) - // cookie is only used for debugging purpose. so it is not fatal error if cookie is failed to be generated. for _, svcPort := range service.Spec.Ports { protocol := strings.ToLower(string(svcPort.Protocol)) @@ -157,18 +290,18 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *kapi.Service, add, h flowProtocols = append(flowProtocols, protocol+"6") } for _, flowProtocol := range flowProtocols { - cookie, err = svcToCookie(service.Namespace, service.Name, flowProtocol, svcPort.NodePort) - if err != nil { - klog.Warningf("Unable to generate cookie for nodePort svc: %s, %s, %s, %d, error: %v", - service.Namespace, service.Name, flowProtocol, svcPort.Port, err) - cookie = "0" - } key = strings.Join([]string{"NodePort", service.Namespace, service.Name, flowProtocol, fmt.Sprintf("%d", svcPort.NodePort)}, "_") // Delete if needed and skip to next protocol if !add { npw.ofm.deleteFlowsByKey(key) continue } + cookie, err = svcToCookie(service.Namespace, service.Name, flowProtocol, svcPort.NodePort) + if err != nil { + klog.Warningf("Unable to generate cookie for nodePort svc: %s, %s, %s, %d, error: %v", + service.Namespace, service.Name, flowProtocol, svcPort.Port, err) + cookie = "0" + } // This allows external traffic ingress when the svc's ExternalTrafficPolicy is // set to Local, and the backend pod is HostNetworked. We need to add // Flows that will DNAT all traffic coming into nodeport to the nodeIP:Port and @@ -211,7 +344,7 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *kapi.Service, add, h // table=0, matches on return traffic from service nodePort and sends it out to primary node interface (br-ex) fmt.Sprintf("cookie=%s, priority=110, in_port=%s, dl_src=%s, %s, tp_src=%d, "+ "actions=output:%s", - cookie, npw.ofportPatch, npw.ofm.getDefaultBridgeMAC(), flowProtocol, svcPort.NodePort, npw.ofportPhys)}) + cookie, netConfig.ofPortPatch, npw.ofm.getDefaultBridgeMAC(), flowProtocol, svcPort.NodePort, npw.ofportPhys)}) } } } @@ -252,16 +385,45 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *kapi.Service, add, h err) } } - if err = npw.createLbAndExternalSvcFlows(service, &svcPort, add, hasLocalHostNetworkEp, protocol, actions, + if err = npw.createLbAndExternalSvcFlows(service, netConfig, &svcPort, add, hasLocalHostNetworkEp, protocol, actions, ingParsedIPs, "Ingress", ofPorts); err != nil { errors = append(errors, err) } - if err = npw.createLbAndExternalSvcFlows(service, &svcPort, add, hasLocalHostNetworkEp, protocol, actions, + if err = npw.createLbAndExternalSvcFlows(service, netConfig, &svcPort, add, hasLocalHostNetworkEp, protocol, actions, extParsedIPs, "External", ofPorts); err != nil { errors = append(errors, err) } } + + // Add flows for default network services that are accessible from UDN networks + if util.IsNetworkSegmentationSupportEnabled() { + // The flow added below has a higher priority than the per UDN service flow: + // priority=200, table=2, ip, ip_src=169.254.0., actions=set_field:->eth_dst,output: + // This ordering ensures that traffic to UDN allowed default services goes to the the default patch port. + + if util.IsUDNEnabledService(ktypes.NamespacedName{Namespace: service.Namespace, Name: service.Name}.String()) { + key = strings.Join([]string{"UDNAllowedSVC", service.Namespace, service.Name}, "_") + if !add { + npw.ofm.deleteFlowsByKey(key) + return utilerrors.Join(errors...) + } + + ipPrefix := "ip" + masqueradeSubnet := config.Gateway.V4MasqueradeSubnet + if !utilnet.IsIPv4String(service.Spec.ClusterIP) { + ipPrefix = "ipv6" + masqueradeSubnet = config.Gateway.V6MasqueradeSubnet + } + // table 2, user-defined network host -> OVN towards default cluster network services + defaultNetConfig := npw.ofm.defaultBridge.getActiveNetworkBridgeConfig(types.DefaultNetworkName) + + npw.ofm.updateFlowCacheEntry(key, []string{fmt.Sprintf("cookie=%s, priority=300, table=2, %s, %s_src=%s, %s_dst=%s, "+ + "actions=set_field:%s->eth_dst,output:%s", + defaultOpenFlowCookie, ipPrefix, ipPrefix, masqueradeSubnet, ipPrefix, service.Spec.ClusterIP, + npw.ofm.getDefaultBridgeMAC().String(), defaultNetConfig.ofPortPatch)}) + } + } return utilerrors.Join(errors...) } @@ -284,7 +446,7 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *kapi.Service, add, h // `actions`: "send to patchport" // `externalIPOrLBIngressIP` is either externalIP.IP or LB.status.ingress.IP // `ipType` is either "External" or "Ingress" -func (npw *nodePortWatcher) createLbAndExternalSvcFlows(service *kapi.Service, svcPort *kapi.ServicePort, add bool, +func (npw *nodePortWatcher) createLbAndExternalSvcFlows(service *kapi.Service, netConfig *bridgeUDNConfiguration, svcPort *kapi.ServicePort, add bool, hasLocalHostNetworkEp bool, protocol string, actions string, externalIPOrLBIngressIPs []string, ipType string, ofPorts []string) error { for _, externalIPOrLBIngressIP := range externalIPOrLBIngressIPs { @@ -315,7 +477,7 @@ func (npw *nodePortWatcher) createLbAndExternalSvcFlows(service *kapi.Service, s continue } // add the ARP bypass flow regardless of service type or gateway modes since its applicable in all scenarios. - arpFlow := npw.generateARPBypassFlow(ofPorts, externalIPOrLBIngressIP, cookie) + arpFlow := npw.generateARPBypassFlow(ofPorts, netConfig.ofPortPatch, externalIPOrLBIngressIP, cookie) externalIPFlows = append(externalIPFlows, arpFlow) // This allows external traffic ingress when the svc's ExternalTrafficPolicy is // set to Local, and the backend pod is HostNetworked. We need to add @@ -352,7 +514,7 @@ func (npw *nodePortWatcher) createLbAndExternalSvcFlows(service *kapi.Service, s etpSvcOpenFlowCookie, npw.ofportPhys)) } else if config.Gateway.Mode == config.GatewayModeShared { // add the ICMP Fragmentation flow for shared gateway mode. - icmpFlow := npw.generateICMPFragmentationFlow(nwDst, externalIPOrLBIngressIP, cookie) + icmpFlow := npw.generateICMPFragmentationFlow(nwDst, externalIPOrLBIngressIP, netConfig.ofPortPatch, cookie) externalIPFlows = append(externalIPFlows, icmpFlow) // case2 (see function description for details) externalIPFlows = append(externalIPFlows, @@ -363,7 +525,7 @@ func (npw *nodePortWatcher) createLbAndExternalSvcFlows(service *kapi.Service, s // table=0, matches on return traffic from service externalIP or LB ingress and sends it out to primary node interface (br-ex) fmt.Sprintf("cookie=%s, priority=110, in_port=%s, dl_src=%s, %s, %s=%s, tp_src=%d, "+ "actions=output:%s", - cookie, npw.ofportPatch, npw.ofm.getDefaultBridgeMAC(), flowProtocol, nwSrc, externalIPOrLBIngressIP, svcPort.Port, npw.ofportPhys)) + cookie, netConfig.ofPortPatch, npw.ofm.getDefaultBridgeMAC(), flowProtocol, nwSrc, externalIPOrLBIngressIP, svcPort.Port, npw.ofportPhys)) } npw.ofm.updateFlowCacheEntry(key, externalIPFlows) } @@ -373,7 +535,7 @@ func (npw *nodePortWatcher) createLbAndExternalSvcFlows(service *kapi.Service, s // generate ARP/NS bypass flow which will send the ARP/NS request everywhere *but* to OVN // OpenFlow will not do hairpin switching, so we can safely add the origin port to the list of ports, too -func (npw *nodePortWatcher) generateARPBypassFlow(ofPorts []string, ipAddr string, cookie string) string { +func (npw *nodePortWatcher) generateARPBypassFlow(ofPorts []string, ofPortPatch, ipAddr string, cookie string) string { addrResDst := "arp_tpa" addrResProto := "arp, arp_op=1" if utilnet.IsIPv6String(ipAddr) { @@ -396,7 +558,7 @@ func (npw *nodePortWatcher) generateARPBypassFlow(ofPorts []string, ipAddr strin // Filtering ofPortPhys is for consistency / readability only, OpenFlow will not send // out the in_port normally (see man 7 ovs-actions) for _, port := range ofPorts { - if port == npw.ofportPatch || port == npw.ofportPhys { + if port == ofPortPatch || port == npw.ofportPhys { continue } arpPortsFiltered = append(arpPortsFiltered, port) @@ -409,7 +571,7 @@ func (npw *nodePortWatcher) generateARPBypassFlow(ofPorts []string, ipAddr strin return arpFlow } -func (npw *nodePortWatcher) generateICMPFragmentationFlow(nwDst, ipAddr string, cookie string) string { +func (npw *nodePortWatcher) generateICMPFragmentationFlow(nwDst, ipAddr string, ofPortPatch, cookie string) string { // we send any ICMP destination unreachable, fragmentation needed to the OVN pipeline too so that // path MTU discovery continues to work. icmpMatch := "icmp" @@ -422,7 +584,7 @@ func (npw *nodePortWatcher) generateICMPFragmentationFlow(nwDst, ipAddr string, } icmpFragmentationFlow := fmt.Sprintf("cookie=%s, priority=110, in_port=%s, %s, %s=%s, icmp_type=%d, "+ "icmp_code=%d, actions=output:%s", - cookie, npw.ofportPhys, icmpMatch, nwDst, ipAddr, icmpType, icmpCode, npw.ofportPatch) + cookie, npw.ofportPhys, icmpMatch, nwDst, ipAddr, icmpType, icmpCode, ofPortPatch) return icmpFragmentationFlow } @@ -500,28 +662,45 @@ func (npw *nodePortWatcher) updateServiceInfo(index ktypes.NamespacedName, servi // addServiceRules ensures the correct iptables rules and OpenFlow physical // flows are programmed for a given service and endpoint configuration -func addServiceRules(service *kapi.Service, localEndpoints []string, svcHasLocalHostNetEndPnt bool, npw *nodePortWatcher) error { +func addServiceRules(service *kapi.Service, netInfo util.NetInfo, localEndpoints []string, svcHasLocalHostNetEndPnt bool, npw *nodePortWatcher) error { // For dpu or Full mode var err error var errors []error + var activeNetwork *bridgeUDNConfiguration if npw != nil { - if err = npw.updateServiceFlowCache(service, true, svcHasLocalHostNetEndPnt); err != nil { + if err = npw.updateServiceFlowCache(service, netInfo, true, svcHasLocalHostNetEndPnt); err != nil { errors = append(errors, err) } npw.ofm.requestFlowSync() - if !npw.dpuMode { - // add iptable rules only in full mode - if err = insertIptRules(getGatewayIPTRules(service, localEndpoints, svcHasLocalHostNetEndPnt)); err != nil { - errors = append(errors, fmt.Errorf("failed to add iptables rules for service: %v", err)) + activeNetwork = npw.ofm.getActiveNetwork(netInfo) + if activeNetwork == nil { + return fmt.Errorf("failed to get active network config for network %s", netInfo.GetNetworkName()) + } + } + + if npw == nil || !npw.dpuMode { + // add iptables/nftables rules only in full mode + iptRules := getGatewayIPTRules(service, localEndpoints, svcHasLocalHostNetEndPnt) + if len(iptRules) > 0 { + if err := insertIptRules(iptRules); err != nil { + err = fmt.Errorf("failed to add iptables rules for service %s/%s: %v", + service.Namespace, service.Name, err) + errors = append(errors, err) } } - } else { - // For Host Only Mode - if err = insertIptRules(getGatewayIPTRules(service, localEndpoints, svcHasLocalHostNetEndPnt)); err != nil { - errors = append(errors, fmt.Errorf("failed to add iptables rules for service: %v", err)) + nftElems := getGatewayNFTRules(service, localEndpoints, svcHasLocalHostNetEndPnt) + if netInfo.IsPrimaryNetwork() && activeNetwork != nil { + nftElems = append(nftElems, getUDNNFTRules(service, activeNetwork)...) + } + if len(nftElems) > 0 { + if err := nodenft.UpdateNFTElements(nftElems); err != nil { + err = fmt.Errorf("failed to update nftables rules for service %s/%s: %v", + service.Namespace, service.Name, err) + errors = append(errors, err) + } } - } + return utilerrors.Join(errors...) } @@ -532,53 +711,86 @@ func delServiceRules(service *kapi.Service, localEndpoints []string, npw *nodePo var errors []error // full mode || dpu mode if npw != nil { - if err = npw.updateServiceFlowCache(service, false, false); err != nil { + if err = npw.updateServiceFlowCache(service, nil, false, false); err != nil { errors = append(errors, fmt.Errorf("error updating service flow cache: %v", err)) } npw.ofm.requestFlowSync() - if !npw.dpuMode { - // Always try and delete all rules here in full mode & in host only mode. We don't touch iptables in dpu mode. - // +--------------------------+-----------------------+-----------------------+--------------------------------+ - // | svcHasLocalHostNetEndPnt | ExternalTrafficPolicy | InternalTrafficPolicy | Scenario for deletion | - // |--------------------------|-----------------------|-----------------------|--------------------------------| - // | | | | deletes the MARK | - // | false | cluster | local | rules for itp=local | - // | | | | called from mangle | - // |--------------------------|-----------------------|-----------------------|--------------------------------| - // | | | | deletes the REDIRECT | - // | true | cluster | local | rules towards target | - // | | | | port for itp=local | - // |--------------------------|-----------------------|-----------------------|--------------------------------| - // | | | | deletes the DNAT rules for | - // | false | local | cluster | non-local-host-net | - // | | | | eps towards masqueradeIP + | - // | | | | DNAT rules towards clusterIP | - // |--------------------------|-----------------------|-----------------------|--------------------------------| - // | | | | deletes the DNAT rules | - // | false||true | cluster | cluster | towards clusterIP | - // | | | | for the default case | - // |--------------------------|-----------------------|-----------------------|--------------------------------| - // | | | | deletes all the rules | - // | false||true | local | local | for etp=local + itp=local | - // | | | | + default dnat towards CIP | - // +--------------------------+-----------------------+-----------------------+--------------------------------+ - - if err = nodeipt.DelRules(getGatewayIPTRules(service, localEndpoints, true)); err != nil { - errors = append(errors, fmt.Errorf("error updating service flow cache: %v", err)) - } - if err = nodeipt.DelRules(getGatewayIPTRules(service, localEndpoints, false)); err != nil { - errors = append(errors, fmt.Errorf("error updating service flow cache: %v", err)) + } + + if npw == nil || !npw.dpuMode { + // Always try and delete all rules here in full mode & in host only mode. We don't touch iptables in dpu mode. + // +--------------------------+-----------------------+-----------------------+--------------------------------+ + // | svcHasLocalHostNetEndPnt | ExternalTrafficPolicy | InternalTrafficPolicy | Scenario for deletion | + // |--------------------------|-----------------------|-----------------------|--------------------------------| + // | | | | deletes the MARK | + // | false | cluster | local | rules for itp=local | + // | | | | called from mangle | + // |--------------------------|-----------------------|-----------------------|--------------------------------| + // | | | | deletes the REDIRECT | + // | true | cluster | local | rules towards target | + // | | | | port for itp=local | + // |--------------------------|-----------------------|-----------------------|--------------------------------| + // | | | | deletes the DNAT rules for | + // | false | local | cluster | non-local-host-net | + // | | | | eps towards masqueradeIP + | + // | | | | DNAT rules towards clusterIP | + // |--------------------------|-----------------------|-----------------------|--------------------------------| + // | | | | deletes the DNAT rules | + // | false||true | cluster | cluster | towards clusterIP | + // | | | | for the default case | + // |--------------------------|-----------------------|-----------------------|--------------------------------| + // | | | | deletes all the rules | + // | false||true | local | local | for etp=local + itp=local | + // | | | | + default dnat towards CIP | + // +--------------------------+-----------------------+-----------------------+--------------------------------+ + + iptRules := getGatewayIPTRules(service, localEndpoints, true) + iptRules = append(iptRules, getGatewayIPTRules(service, localEndpoints, false)...) + if len(iptRules) > 0 { + if err := nodeipt.DelRules(iptRules); err != nil { + err := fmt.Errorf("failed to delete iptables rules for service %s/%s: %v", + service.Namespace, service.Name, err) + errors = append(errors, err) } } - } else { - - if err = nodeipt.DelRules(getGatewayIPTRules(service, localEndpoints, true)); err != nil { - errors = append(errors, fmt.Errorf("error updating service flow cache: %v", err)) + nftElems := getGatewayNFTRules(service, localEndpoints, true) + nftElems = append(nftElems, getGatewayNFTRules(service, localEndpoints, false)...) + if len(nftElems) > 0 { + if err := nodenft.DeleteNFTElements(nftElems); err != nil { + err = fmt.Errorf("failed to delete nftables rules for service %s/%s: %v", + service.Namespace, service.Name, err) + errors = append(errors, err) + } } - if err = nodeipt.DelRules(getGatewayIPTRules(service, localEndpoints, false)); err != nil { - errors = append(errors, fmt.Errorf("error updating service flow cache: %v", err)) + + if util.IsNetworkSegmentationSupportEnabled() { + // NOTE: The code below is not using nodenft.DeleteNFTElements because it first adds elements + // before removing them, which fails for UDN NFT rules. These rules only have map keys, + // not key-value pairs, making it impossible to add. + // Attempt to delete the elements directly and handle the IsNotFound error. + // + // TODO: Switch to `nft destroy` when supported. + nftElems = getUDNNFTRules(service, nil) + if len(nftElems) > 0 { + nft, err := nodenft.GetNFTablesHelper() + if err != nil { + return utilerrors.Join(append(errors, err)...) + } + + tx := nft.NewTransaction() + for _, elem := range nftElems { + tx.Delete(elem) + } + + if err := nft.Run(context.TODO(), tx); err != nil && !knftables.IsNotFound(err) { + err = fmt.Errorf("failed to delete nftables rules for UDN service %s/%s: %v", + service.Namespace, service.Name, err) + errors = append(errors, err) + } + } } } + return utilerrors.Join(errors...) } @@ -605,8 +817,14 @@ func (npw *nodePortWatcher) AddService(service *kapi.Service) error { } klog.V(5).Infof("Adding service %s in namespace %s", service.Name, service.Namespace) + + netInfo, err := npw.nadController.GetActiveNetworkForNamespace(service.Namespace) + if err != nil { + return fmt.Errorf("error getting active network for service %s in namespace %s: %w", service.Name, service.Namespace, err) + } + name := ktypes.NamespacedName{Namespace: service.Namespace, Name: service.Name} - epSlices, err := npw.watchFactory.GetServiceEndpointSlices(service.Namespace, service.Name, types.DefaultNetworkName) + epSlices, err := npw.watchFactory.GetServiceEndpointSlices(service.Namespace, service.Name, netInfo.GetNetworkName()) if err != nil { if !kerrors.IsNotFound(err) { return fmt.Errorf("error retrieving all endpointslices for service %s/%s during service add: %w", @@ -625,13 +843,14 @@ func (npw *nodePortWatcher) AddService(service *kapi.Service) error { if exists := npw.addOrSetServiceInfo(name, service, hasLocalHostNetworkEp, localEndpoints); !exists { klog.V(5).Infof("Service Add %s event in namespace %s came before endpoint event setting svcConfig", service.Name, service.Namespace) - if err := addServiceRules(service, sets.List(localEndpoints), hasLocalHostNetworkEp, npw); err != nil { - return fmt.Errorf("AddService failed for nodePortWatcher: %v", err) + if err := addServiceRules(service, netInfo, sets.List(localEndpoints), hasLocalHostNetworkEp, npw); err != nil { + npw.getAndDeleteServiceInfo(name) + return fmt.Errorf("AddService failed for nodePortWatcher: %w, trying delete: %w", err, delServiceRules(service, sets.List(localEndpoints), npw)) } } else { // Need to update flows here in case an attribute of the gateway has changed, such as MAC address klog.V(5).Infof("Updating already programmed rules for %s in namespace %s", service.Name, service.Namespace) - if err = npw.updateServiceFlowCache(service, true, hasLocalHostNetworkEp); err != nil { + if err = npw.updateServiceFlowCache(service, netInfo, true, hasLocalHostNetworkEp); err != nil { return fmt.Errorf("failed to update flows for service %s/%s: %w", service.Namespace, service.Name, err) } npw.ofm.requestFlowSync() @@ -662,6 +881,7 @@ func (npw *nodePortWatcher) UpdateService(old, new *kapi.Service) error { // Delete old rules if needed, but don't delete svcConfig // so that we don't miss any endpoint update events here klog.V(5).Infof("Deleting old service rules for: %v", old) + if err = delServiceRules(old, sets.List(svcConfig.localEndpoints), npw); err != nil { errors = append(errors, err) } @@ -669,7 +889,13 @@ func (npw *nodePortWatcher) UpdateService(old, new *kapi.Service) error { if util.ServiceTypeHasClusterIP(new) && util.IsClusterIPSet(new) { klog.V(5).Infof("Adding new service rules for: %v", new) - if err = addServiceRules(new, sets.List(svcConfig.localEndpoints), svcConfig.hasLocalHostNetworkEp, npw); err != nil { + + netInfo, err := npw.nadController.GetActiveNetworkForNamespace(new.Namespace) + if err != nil { + return fmt.Errorf("error getting active network for service %s in namespace %s: %w", new.Name, new.Namespace, err) + } + + if err = addServiceRules(new, netInfo, sets.List(svcConfig.localEndpoints), svcConfig.hasLocalHostNetworkEp, npw); err != nil { errors = append(errors, err) } } @@ -755,7 +981,8 @@ func (npw *nodePortWatcher) DeleteService(service *kapi.Service) error { func (npw *nodePortWatcher) SyncServices(services []interface{}) error { var err error var errors []error - keepIPTRules := []nodeipt.Rule{} + var keepIPTRules []nodeipt.Rule + var keepNFTSetElems, keepNFTMapElems []*knftables.Element for _, serviceInterface := range services { name := ktypes.NamespacedName{Namespace: serviceInterface.(*kapi.Service).Namespace, Name: serviceInterface.(*kapi.Service).Name} @@ -770,7 +997,13 @@ func (npw *nodePortWatcher) SyncServices(services []interface{}) error { continue } - epSlices, err := npw.watchFactory.GetServiceEndpointSlices(service.Namespace, service.Name, types.DefaultNetworkName) + netInfo, err := npw.nadController.GetActiveNetworkForNamespace(service.Namespace) + if err != nil { + errors = append(errors, err) + continue + } + + epSlices, err := npw.watchFactory.GetServiceEndpointSlices(service.Namespace, service.Name, netInfo.GetNetworkName()) if err != nil { if !kerrors.IsNotFound(err) { return fmt.Errorf("error retrieving all endpointslices for service %s/%s during SyncServices: %w", @@ -785,24 +1018,33 @@ func (npw *nodePortWatcher) SyncServices(services []interface{}) error { npw.getAndSetServiceInfo(name, service, hasLocalHostNetworkEp, localEndpoints) // Delete OF rules for service if they exist - if err = npw.updateServiceFlowCache(service, false, hasLocalHostNetworkEp); err != nil { + if err = npw.updateServiceFlowCache(service, netInfo, false, hasLocalHostNetworkEp); err != nil { errors = append(errors, err) } - if err = npw.updateServiceFlowCache(service, true, hasLocalHostNetworkEp); err != nil { + if err = npw.updateServiceFlowCache(service, netInfo, true, hasLocalHostNetworkEp); err != nil { errors = append(errors, err) } - // Add correct iptables rules only for Full mode + // Add correct netfilter rules only for Full mode if !npw.dpuMode { - keepIPTRules = append(keepIPTRules, getGatewayIPTRules(service, sets.List(localEndpoints), hasLocalHostNetworkEp)...) + localEndpointsArray := sets.List(localEndpoints) + keepIPTRules = append(keepIPTRules, getGatewayIPTRules(service, localEndpointsArray, hasLocalHostNetworkEp)...) + keepNFTSetElems = append(keepNFTSetElems, getGatewayNFTRules(service, localEndpointsArray, hasLocalHostNetworkEp)...) + if util.IsNetworkSegmentationSupportEnabled() && netInfo.IsPrimaryNetwork() { + netConfig := npw.ofm.getActiveNetwork(netInfo) + if netConfig == nil { + return fmt.Errorf("failed to get active network config for network %s", netInfo.GetNetworkName()) + } + keepNFTMapElems = append(keepNFTMapElems, getUDNNFTRules(service, netConfig)...) + } } } // sync OF rules once npw.ofm.requestFlowSync() - // sync IPtables rules once only for Full mode + // sync netfilter rules once only for Full mode if !npw.dpuMode { // (NOTE: Order is important, add jump to iptableETPChain before jump to NP/EIP chains) - for _, chain := range []string{iptableITPChain, egressservice.Chain, iptableNodePortChain, iptableExternalIPChain, iptableETPChain, iptableMgmPortChain} { + for _, chain := range []string{iptableITPChain, egressservice.Chain, iptableNodePortChain, iptableExternalIPChain, iptableETPChain} { if err = recreateIPTRules("nat", chain, keepIPTRules); err != nil { errors = append(errors, err) } @@ -810,6 +1052,19 @@ func (npw *nodePortWatcher) SyncServices(services []interface{}) error { if err = recreateIPTRules("mangle", iptableITPChain, keepIPTRules); err != nil { errors = append(errors, err) } + + for _, set := range []string{nftablesMgmtPortNoSNATNodePorts, nftablesMgmtPortNoSNATServicesV4, nftablesMgmtPortNoSNATServicesV6} { + if err = recreateNFTSet(set, keepNFTSetElems); err != nil { + errors = append(errors, err) + } + } + if util.IsNetworkSegmentationSupportEnabled() { + for _, nftMap := range []string{nftablesUDNMarkNodePortsMap, nftablesUDNMarkExternalIPsV4Map, nftablesUDNMarkExternalIPsV6Map} { + if err = recreateNFTMap(nftMap, keepNFTMapElems); err != nil { + errors = append(errors, err) + } + } + } } return utilerrors.Join(errors...) } @@ -819,12 +1074,25 @@ func (npw *nodePortWatcher) AddEndpointSlice(epSlice *discovery.EndpointSlice) e var errors []error var svc *kapi.Service - svcName := epSlice.Labels[discovery.LabelServiceName] - svc, err = npw.watchFactory.GetService(epSlice.Namespace, svcName) + netInfo, err := npw.nadController.GetActiveNetworkForNamespace(epSlice.Namespace) + if err != nil { + return fmt.Errorf("error getting active network for endpointslice %s in namespace %s: %w", epSlice.Name, epSlice.Namespace, err) + } + + if util.IsNetworkSegmentationSupportEnabled() && !util.IsEndpointSliceForNetwork(epSlice, netInfo) { + return nil + } + + svcNamespacedName, err := util.ServiceFromEndpointSlice(epSlice, netInfo) + if err != nil || svcNamespacedName == nil { + return err + } + + svc, err = npw.watchFactory.GetService(svcNamespacedName.Namespace, svcNamespacedName.Name) if err != nil { if !kerrors.IsNotFound(err) { return fmt.Errorf("error retrieving service %s/%s during endpointslice add: %w", - epSlice.Namespace, svcName, err) + svcNamespacedName.Namespace, svcNamespacedName.Name, err) } // This is not necessarily an error. For e.g when there are endpoints // without a corresponding service. @@ -839,7 +1107,7 @@ func (npw *nodePortWatcher) AddEndpointSlice(epSlice *discovery.EndpointSlice) e klog.V(5).Infof("Adding endpointslice %s in namespace %s", epSlice.Name, epSlice.Namespace) nodeIPs := npw.nodeIPManager.ListAddresses() - epSlices, err := npw.watchFactory.GetServiceEndpointSlices(svc.Namespace, svc.Name, types.DefaultNetworkName) + epSlices, err := npw.watchFactory.GetServiceEndpointSlices(svc.Namespace, svc.Name, netInfo.GetNetworkName()) if err != nil { // No need to continue adding the new endpoint slice, if we can't retrieve all slices for this service return fmt.Errorf("error retrieving endpointslices for service %s/%s during endpointslice add: %w", svc.Namespace, svc.Name, err) @@ -850,14 +1118,10 @@ func (npw *nodePortWatcher) AddEndpointSlice(epSlice *discovery.EndpointSlice) e // Here we make sure the correct rules are programmed whenever an AddEndpointSlice event is // received, only alter flows if we need to, i.e if cache wasn't set or if it was and // hasLocalHostNetworkEp or localEndpoints state (for LB svc where NPs=0) changed, to prevent flow churn - namespacedName, err := util.ServiceNamespacedNameFromEndpointSlice(epSlice) - if err != nil { - return fmt.Errorf("cannot add %s/%s to nodePortWatcher: %v", epSlice.Namespace, epSlice.Name, err) - } - out, exists := npw.getAndSetServiceInfo(namespacedName, svc, hasLocalHostNetworkEp, localEndpoints) + out, exists := npw.getAndSetServiceInfo(*svcNamespacedName, svc, hasLocalHostNetworkEp, localEndpoints) if !exists { klog.V(5).Infof("Endpointslice %s ADD event in namespace %s is creating rules", epSlice.Name, epSlice.Namespace) - return addServiceRules(svc, sets.List(localEndpoints), hasLocalHostNetworkEp, npw) + return addServiceRules(svc, netInfo, sets.List(localEndpoints), hasLocalHostNetworkEp, npw) } if out.hasLocalHostNetworkEp != hasLocalHostNetworkEp || @@ -866,7 +1130,7 @@ func (npw *nodePortWatcher) AddEndpointSlice(epSlice *discovery.EndpointSlice) e if err = delServiceRules(svc, sets.List(out.localEndpoints), npw); err != nil { errors = append(errors, err) } - if err = addServiceRules(svc, sets.List(localEndpoints), hasLocalHostNetworkEp, npw); err != nil { + if err = addServiceRules(svc, netInfo, sets.List(localEndpoints), hasLocalHostNetworkEp, npw); err != nil { errors = append(errors, err) } return utilerrors.Join(errors...) @@ -880,13 +1144,21 @@ func (npw *nodePortWatcher) DeleteEndpointSlice(epSlice *discovery.EndpointSlice var errors []error var hasLocalHostNetworkEp = false + netInfo, err := npw.nadController.GetActiveNetworkForNamespace(epSlice.Namespace) + if err != nil { + return fmt.Errorf("error getting active network for endpointslice %s in namespace %s: %w", epSlice.Name, epSlice.Namespace, err) + } + if util.IsNetworkSegmentationSupportEnabled() && !util.IsEndpointSliceForNetwork(epSlice, netInfo) { + return nil + } + klog.V(5).Infof("Deleting endpointslice %s in namespace %s", epSlice.Name, epSlice.Namespace) // remove rules for endpoints and add back normal ones - namespacedName, err := util.ServiceNamespacedNameFromEndpointSlice(epSlice) - if err != nil { - return fmt.Errorf("cannot delete %s/%s from nodePortWatcher: %v", epSlice.Namespace, epSlice.Name, err) + namespacedName, err := util.ServiceFromEndpointSlice(epSlice, netInfo) + if err != nil || namespacedName == nil { + return err } - epSlices, err := npw.watchFactory.GetServiceEndpointSlices(epSlice.Namespace, epSlice.Labels[discovery.LabelServiceName], types.DefaultNetworkName) + epSlices, err := npw.watchFactory.GetServiceEndpointSlices(namespacedName.Namespace, namespacedName.Name, netInfo.GetNetworkName()) if err != nil { if !kerrors.IsNotFound(err) { return fmt.Errorf("error retrieving all endpointslices for service %s/%s during endpointslice delete on %s: %w", @@ -904,7 +1176,12 @@ func (npw *nodePortWatcher) DeleteEndpointSlice(epSlice *discovery.EndpointSlice namespacedName.Namespace, namespacedName.Name, epSlice.Name, err) } localEndpoints := npw.GetLocalEligibleEndpointAddresses(epSlices, svc) - if svcConfig, exists := npw.updateServiceInfo(namespacedName, nil, &hasLocalHostNetworkEp, localEndpoints); exists { + if svcConfig, exists := npw.updateServiceInfo(*namespacedName, nil, &hasLocalHostNetworkEp, localEndpoints); exists { + netInfo, err := npw.nadController.GetActiveNetworkForNamespace(namespacedName.Namespace) + if err != nil { + return fmt.Errorf("error getting active network for service %s in namespace %s: %w", svc.Name, svc.Namespace, err) + } + // Lock the cache mutex here so we don't miss a service delete during an endpoint delete // we have to do this because deleting and adding iptables rules is slow. npw.serviceInfoLock.Lock() @@ -913,7 +1190,7 @@ func (npw *nodePortWatcher) DeleteEndpointSlice(epSlice *discovery.EndpointSlice if err = delServiceRules(svcConfig.service, sets.List(svcConfig.localEndpoints), npw); err != nil { errors = append(errors, err) } - if err = addServiceRules(svcConfig.service, sets.List(localEndpoints), hasLocalHostNetworkEp, npw); err != nil { + if err = addServiceRules(svcConfig.service, netInfo, sets.List(localEndpoints), hasLocalHostNetworkEp, npw); err != nil { errors = append(errors, err) } return utilerrors.Join(errors...) @@ -933,9 +1210,18 @@ func (npw *nodePortWatcher) UpdateEndpointSlice(oldEpSlice, newEpSlice *discover var err error var errors []error - namespacedName, err := util.ServiceNamespacedNameFromEndpointSlice(newEpSlice) + netInfo, err := npw.nadController.GetActiveNetworkForNamespace(newEpSlice.Namespace) if err != nil { - return fmt.Errorf("cannot update %s/%s in nodePortWatcher: %v", newEpSlice.Namespace, newEpSlice.Name, err) + return fmt.Errorf("error getting active network for endpointslice %s in namespace %s: %w", newEpSlice.Name, newEpSlice.Namespace, err) + } + + if util.IsNetworkSegmentationSupportEnabled() && !util.IsEndpointSliceForNetwork(newEpSlice, netInfo) { + return nil + } + + namespacedName, err := util.ServiceFromEndpointSlice(newEpSlice, netInfo) + if err != nil || namespacedName == nil { + return err } svc, err := npw.watchFactory.GetService(namespacedName.Namespace, namespacedName.Name) if err != nil && !kerrors.IsNotFound(err) { @@ -953,7 +1239,7 @@ func (npw *nodePortWatcher) UpdateEndpointSlice(oldEpSlice, newEpSlice *discover var serviceInfo *serviceConfig var exists bool - if serviceInfo, exists = npw.getServiceInfo(namespacedName); !exists { + if serviceInfo, exists = npw.getServiceInfo(*namespacedName); !exists { // When a service is updated from externalName to nodeport type, it won't be // in nodePortWatcher cache (npw): in this case, have the new nodeport IPtable rules // installed. @@ -970,7 +1256,7 @@ func (npw *nodePortWatcher) UpdateEndpointSlice(oldEpSlice, newEpSlice *discover // Update rules and service cache if hasHostNetworkEndpoints status changed or localEndpoints changed nodeIPs := npw.nodeIPManager.ListAddresses() - epSlices, err := npw.watchFactory.GetServiceEndpointSlices(newEpSlice.Namespace, newEpSlice.Labels[discovery.LabelServiceName], types.DefaultNetworkName) + epSlices, err := npw.watchFactory.GetServiceEndpointSlices(newEpSlice.Namespace, namespacedName.Name, netInfo.GetNetworkName()) if err != nil { if !kerrors.IsNotFound(err) { return fmt.Errorf("error retrieving all endpointslices for service %s/%s during endpointslice update on %s: %w", @@ -1011,7 +1297,13 @@ func (npwipt *nodePortWatcherIptables) AddService(service *kapi.Service) error { if !util.ServiceTypeHasClusterIP(service) || !util.IsClusterIPSet(service) { return nil } - if err := addServiceRules(service, nil, false, nil); err != nil { + + netInfo, err := npwipt.nadController.GetActiveNetworkForNamespace(service.Namespace) + if err != nil { + return fmt.Errorf("error getting active network for service %s in namespace %s: %w", service.Name, service.Namespace, err) + } + + if err := addServiceRules(service, netInfo, nil, false, nil); err != nil { return fmt.Errorf("AddService failed for nodePortWatcherIptables: %v", err) } return nil @@ -1034,7 +1326,12 @@ func (npwipt *nodePortWatcherIptables) UpdateService(old, new *kapi.Service) err } if util.ServiceTypeHasClusterIP(new) && util.IsClusterIPSet(new) { - if err = addServiceRules(new, nil, false, nil); err != nil { + netInfo, err := npwipt.nadController.GetActiveNetworkForNamespace(new.Namespace) + if err != nil { + return fmt.Errorf("error getting active network for service %s in namespace %s: %w", new.Name, new.Namespace, err) + } + + if err = addServiceRules(new, netInfo, nil, false, nil); err != nil { errors = append(errors, err) } } @@ -1050,6 +1347,7 @@ func (npwipt *nodePortWatcherIptables) DeleteService(service *kapi.Service) erro if !util.ServiceTypeHasClusterIP(service) || !util.IsClusterIPSet(service) { return nil } + if err := delServiceRules(service, nil, nil); err != nil { return fmt.Errorf("DeleteService failed for nodePortWatcherIptables: %v", err) } @@ -1060,6 +1358,7 @@ func (npwipt *nodePortWatcherIptables) SyncServices(services []interface{}) erro var err error var errors []error keepIPTRules := []nodeipt.Rule{} + keepNFTElems := []*knftables.Element{} for _, serviceInterface := range services { service, ok := serviceInterface.(*kapi.Service) if !ok { @@ -1074,14 +1373,22 @@ func (npwipt *nodePortWatcherIptables) SyncServices(services []interface{}) erro // Add correct iptables rules. // TODO: ETP and ITP is not implemented for smart NIC mode. keepIPTRules = append(keepIPTRules, getGatewayIPTRules(service, nil, false)...) + keepNFTElems = append(keepNFTElems, getGatewayNFTRules(service, nil, false)...) } - // sync IPtables rules once + // sync rules once for _, chain := range []string{iptableNodePortChain, iptableExternalIPChain} { if err = recreateIPTRules("nat", chain, keepIPTRules); err != nil { errors = append(errors, err) } } + + for _, set := range []string{nftablesMgmtPortNoSNATNodePorts, nftablesMgmtPortNoSNATServicesV4, nftablesMgmtPortNoSNATServicesV6} { + if err = recreateNFTSet(set, keepNFTElems); err != nil { + errors = append(errors, err) + } + } + return utilerrors.Join(errors...) } @@ -1222,32 +1529,50 @@ func flowsForDefaultBridge(bridge *bridgeConfiguration, extraIPs []net.IP) ([]st defaultOpenFlowCookie, ofPortHost, config.Gateway.MasqueradeIPs.V6OVNMasqueradeIP.String(), config.Default.OVNMasqConntrackZone)) } - var protoPrefix string - var masqIP string + var protoPrefix, masqIP, masqSubnet string // table 0, packets coming from Host -> Service for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { if utilnet.IsIPv4CIDR(svcCIDR) { protoPrefix = "ip" masqIP = config.Gateway.MasqueradeIPs.V4HostMasqueradeIP.String() + masqSubnet = config.Gateway.V4MasqueradeSubnet } else { protoPrefix = "ipv6" masqIP = config.Gateway.MasqueradeIPs.V6HostMasqueradeIP.String() + masqSubnet = config.Gateway.V6MasqueradeSubnet } - // table 0, Host -> OVN towards SVC, SNAT to special IP + // table 0, Host (default network) -> OVN towards SVC, SNAT to special IP. dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=500, in_port=%s, %s, %s_dst=%s,"+ + fmt.Sprintf("cookie=%s, priority=500, in_port=%s, %s, %s_dst=%s, "+ "actions=ct(commit,zone=%d,nat(src=%s),table=2)", - defaultOpenFlowCookie, ofPortHost, protoPrefix, protoPrefix, svcCIDR, config.Default.HostMasqConntrackZone, masqIP)) + defaultOpenFlowCookie, ofPortHost, protoPrefix, protoPrefix, + svcCIDR, config.Default.HostMasqConntrackZone, masqIP)) + + if util.IsNetworkSegmentationSupportEnabled() { + // table 0, Host (UDNs) -> OVN towards SVC, SNAT to special IP. + // For packets originating from UDN, commit without NATing, those + // have already been SNATed to the masq IP of the UDN. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=550, in_port=%s, %s, %s_src=%s, %s_dst=%s, "+ + "actions=ct(commit,zone=%d,table=2)", + defaultOpenFlowCookie, ofPortHost, protoPrefix, protoPrefix, + masqSubnet, protoPrefix, svcCIDR, config.Default.HostMasqConntrackZone)) + } + masqDst := masqIP + if util.IsNetworkSegmentationSupportEnabled() { + // In UDN match on the whole masquerade subnet to handle replies from UDN enabled services + masqDst = masqSubnet + } for _, netConfig := range bridge.patchedNetConfigs() { // table 0, Reply hairpin traffic to host, coming from OVN, unSNAT dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=500, in_port=%s, %s, %s_src=%s, %s_dst=%s,"+ "actions=ct(zone=%d,nat,table=3)", defaultOpenFlowCookie, netConfig.ofPortPatch, protoPrefix, protoPrefix, svcCIDR, - protoPrefix, masqIP, config.Default.HostMasqConntrackZone)) + protoPrefix, masqDst, config.Default.HostMasqConntrackZone)) // table 0, Reply traffic coming from OVN to outside, drop it if the DNAT wasn't done either // at the GR load balancer or switch load balancer. It means the correct port wasn't provided. // nodeCIDR->serviceCIDR traffic flow is internal and it shouldn't be carried to outside the cluster @@ -1329,11 +1654,53 @@ func flowsForDefaultBridge(bridge *bridgeConfiguration, extraIPs []net.IP) ([]st fmt.Sprintf("cookie=%s, priority=10, table=1, dl_dst=%s, actions=output:%s", defaultOpenFlowCookie, bridgeMacAddress, ofPortHost)) } + defaultNetConfig := bridge.netConfig[types.DefaultNetworkName] + // table 2, dispatch from Host -> OVN dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, table=2, "+ - "actions=set_field:%s->eth_dst,output:%s", defaultOpenFlowCookie, bridgeMacAddress, defaultNetConfig.ofPortPatch)) + fmt.Sprintf("cookie=%s, priority=100, table=2, "+ + "actions=set_field:%s->eth_dst,output:%s", defaultOpenFlowCookie, + bridgeMacAddress, defaultNetConfig.ofPortPatch)) + + // table 2, priority 200, dispatch from UDN -> Host -> OVN. These packets have + // already been SNATed to the UDN's masq IP or have been marked with the UDN's packet mark. + if config.IPv4Mode { + for _, netConfig := range bridge.patchedNetConfigs() { + if netConfig.isDefaultNetwork() { + continue + } + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=200, table=2, ip, ip_src=%s, "+ + "actions=set_field:%s->eth_dst,output:%s", + defaultOpenFlowCookie, netConfig.v4MasqIPs.ManagementPort.IP, + bridgeMacAddress, netConfig.ofPortPatch)) + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=200, table=2, ip, pkt_mark=%s, "+ + "actions=set_field:%s->eth_dst,output:%s", + defaultOpenFlowCookie, netConfig.pktMark, + bridgeMacAddress, netConfig.ofPortPatch)) + } + } + + if config.IPv6Mode { + for _, netConfig := range bridge.patchedNetConfigs() { + if netConfig.isDefaultNetwork() { + continue + } + + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=200, table=2, ip6, ipv6_src=%s, "+ + "actions=set_field:%s->eth_dst,output:%s", + defaultOpenFlowCookie, netConfig.v6MasqIPs.ManagementPort.IP, + bridgeMacAddress, netConfig.ofPortPatch)) + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=200, table=2, ip6, pkt_mark=%s, "+ + "actions=set_field:%s->eth_dst,output:%s", + defaultOpenFlowCookie, netConfig.pktMark, + bridgeMacAddress, netConfig.ofPortPatch)) + } + } // table 3, dispatch from OVN -> Host dftFlows = append(dftFlows, @@ -1421,9 +1788,24 @@ func commonFlows(subnets []*net.IPNet, bridge *bridgeConfiguration) ([]string, e defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, ovnKubeNodeSNATMark, config.Default.ConntrackZone, physicalIP.IP, netConfig.masqCTMark, ofPortPhys)) + // table 0, packets coming from egressIP pods only from user defined networks. If an egressIP is assigned to + // this node, then all networks get a flow even if no pods on that network were selected for by this egressIP. + if util.IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnableInterconnect && + config.Gateway.Mode != config.GatewayModeDisabled && bridge.eipMarkIPs != nil { + if netConfig.masqCTMark != ctMarkOVN { + for mark, eip := range bridge.eipMarkIPs.GetIPv4() { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ip, pkt_mark=%d, "+ + "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", + defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, mark, + config.Default.ConntrackZone, eip, netConfig.masqCTMark, ofPortPhys)) + } + } + } + // table 0, packets coming from pods headed externally. Commit connections with ct_mark ctMarkOVN // so that reverse direction goes back to the pods. - if netConfig.masqCTMark == ctMarkOVN { + if netConfig.isDefaultNetwork() { dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ip, "+ "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:%s", @@ -1434,7 +1816,7 @@ func commonFlows(subnets []*net.IPNet, bridge *bridgeConfiguration) ([]string, e dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ip, ip_src=%s, "+ "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", - defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, netConfig.v4MasqIP.IP, config.Default.ConntrackZone, + defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, netConfig.v4MasqIPs.GatewayRouter.IP, config.Default.ConntrackZone, physicalIP.IP, netConfig.masqCTMark, ofPortPhys)) } } @@ -1479,6 +1861,7 @@ func commonFlows(subnets []*net.IPNet, bridge *bridgeConfiguration) ([]string, e "actions=ct(zone=%d, nat, table=1)", defaultOpenFlowCookie, ofPortPhys, config.Default.ConntrackZone)) } } + if config.IPv6Mode { physicalIP, err := util.MatchFirstIPNetFamily(true, bridgeIPs) if err != nil { @@ -1497,9 +1880,24 @@ func commonFlows(subnets []*net.IPNet, bridge *bridgeConfiguration) ([]string, e defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, ovnKubeNodeSNATMark, config.Default.ConntrackZone, physicalIP.IP, netConfig.masqCTMark, ofPortPhys)) + // table 0, packets coming from egressIP pods only from user defined networks. If an egressIP is assigned to + // this node, then all networks get a flow even if no pods on that network were selected for by this egressIP. + if util.IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnableInterconnect && + config.Gateway.Mode != config.GatewayModeDisabled && bridge.eipMarkIPs != nil { + if netConfig.masqCTMark != ctMarkOVN { + for mark, eip := range bridge.eipMarkIPs.GetIPv6() { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ipv6, pkt_mark=%d, "+ + "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", + defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, mark, + config.Default.ConntrackZone, eip, netConfig.masqCTMark, ofPortPhys)) + } + } + } + // table 0, packets coming from pods headed externally. Commit connections with ct_mark ctMarkOVN // so that reverse direction goes back to the pods. - if netConfig.masqCTMark == ctMarkOVN { + if netConfig.isDefaultNetwork() { dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ipv6, "+ "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:%s", @@ -1509,7 +1907,7 @@ func commonFlows(subnets []*net.IPNet, bridge *bridgeConfiguration) ([]string, e dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ipv6, ipv6_src=%s, "+ "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", - defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, netConfig.v6MasqIP.IP, config.Default.ConntrackZone, + defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, netConfig.v6MasqIPs.GatewayRouter.IP, config.Default.ConntrackZone, physicalIP.IP, netConfig.masqCTMark, ofPortPhys)) } } @@ -1678,7 +2076,7 @@ func setBridgeOfPorts(bridge *bridgeConfiguration) error { bridge.ofPortPhys = ofportPhys } - // Get ofport represeting the host. That is, host representor port in case of DPUs, ovsLocalPort otherwise. + // Get ofport representing the host. That is, host representor port in case of DPUs, ovsLocalPort otherwise. if config.OvnKubeNode.Mode == types.NodeModeDPU { var stderr string hostRep, err := util.GetDPUHostInterface(bridge.bridgeName) @@ -1754,12 +2152,19 @@ func initSvcViaMgmPortRoutingRules(hostSubnets []*net.IPNet) error { return nil } -func newSharedGateway(nodeName string, subnets []*net.IPNet, gwNextHops []net.IP, gwIntf, egressGWIntf string, - gwIPs []*net.IPNet, nodeAnnotator kube.Annotator, kube kube.Interface, cfg *managementPortConfig, - watchFactory factory.NodeWatchFactory, routeManager *routemanager.Controller) (*gateway, error) { - klog.Info("Creating new shared gateway") +func newGateway(nodeName string, subnets []*net.IPNet, gwNextHops []net.IP, gwIntf, egressGWIntf string, + gwIPs []*net.IPNet, nodeAnnotator kube.Annotator, cfg *managementPortConfig, kube kube.Interface, + watchFactory factory.NodeWatchFactory, routeManager *routemanager.Controller, linkManager *linkmanager.Controller, + nadController *nad.NetAttachDefinitionController, gatewayMode config.GatewayMode) (*gateway, error) { + klog.Info("Creating new gateway") gw := &gateway{} + if gatewayMode == config.GatewayModeLocal { + if err := initLocalGateway(subnets, cfg); err != nil { + return nil, fmt.Errorf("failed to initialize new local gateway, err: %w", err) + } + } + gwBridge, exGwBridge, err := gatewayInitInternal( nodeName, gwIntf, egressGWIntf, gwNextHops, gwIPs, nodeAnnotator) if err != nil { @@ -1812,7 +2217,7 @@ func newSharedGateway(nodeName string, subnets []*net.IPNet, gwNextHops []net.IP gw.initFunc = func() error { // Program cluster.GatewayIntf to let non-pod traffic to go to host // stack - klog.Info("Creating Shared Gateway Openflow Manager") + klog.Info("Creating Gateway Openflow Manager") err := setBridgeOfPorts(gwBridge) if err != nil { return err @@ -1823,6 +2228,10 @@ func newSharedGateway(nodeName string, subnets []*net.IPNet, gwNextHops []net.IP return err } } + if util.IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnableInterconnect && config.Gateway.Mode != config.GatewayModeDisabled { + gw.bridgeEIPAddrManager = newBridgeEIPAddrManager(nodeName, gwBridge.bridgeName, linkManager, kube, watchFactory.EgressIPInformer(), watchFactory.NodeCoreInformer()) + gwBridge.eipMarkIPs = gw.bridgeEIPAddrManager.GetCache() + } gw.nodeIPManager = newAddressManager(nodeName, kube, cfg, watchFactory, gwBridge) nodeIPs := gw.nodeIPManager.ListAddresses() @@ -1879,8 +2288,8 @@ func newSharedGateway(nodeName string, subnets []*net.IPNet, gwNextHops []net.IP return err } } - klog.Info("Creating Shared Gateway Node Port Watcher") - gw.nodePortWatcher, err = newNodePortWatcher(gwBridge, gw.openflowManager, gw.nodeIPManager, watchFactory) + klog.Info("Creating Gateway Node Port Watcher") + gw.nodePortWatcher, err = newNodePortWatcher(gwBridge, gw.openflowManager, gw.nodeIPManager, watchFactory, nadController) if err != nil { return err } @@ -1896,21 +2305,13 @@ func newSharedGateway(nodeName string, subnets []*net.IPNet, gwNextHops []net.IP return nil } gw.watchFactory = watchFactory.(*factory.WatchFactory) - klog.Info("Shared Gateway Creation Complete") + klog.Info("Gateway Creation Complete") return gw, nil } func newNodePortWatcher(gwBridge *bridgeConfiguration, ofm *openflowManager, - nodeIPManager *addressManager, watchFactory factory.NodeWatchFactory) (*nodePortWatcher, error) { - // TODO(dceara): support services for UDNs - defaultNetConfig := gwBridge.netConfig[types.DefaultNetworkName] - // Get ofport of patchPort - ofportPatch, stderr, err := util.GetOVSOfPort("--if-exists", "get", - "interface", defaultNetConfig.patchPort, "ofport") - if err != nil { - return nil, fmt.Errorf("failed to get ofport of %s, stderr: %q, error: %v", - defaultNetConfig.patchPort, stderr, err) - } + nodeIPManager *addressManager, watchFactory factory.NodeWatchFactory, + nadController *nad.NetAttachDefinitionController) (*nodePortWatcher, error) { // Get ofport of physical interface ofportPhys, stderr, err := util.GetOVSOfPort("--if-exists", "get", @@ -1936,6 +2337,11 @@ func newNodePortWatcher(gwBridge *bridgeConfiguration, ofm *openflowManager, return nil, err } } + if util.IsNetworkSegmentationSupportEnabled() { + if err := configureUDNServicesNFTables(); err != nil { + return nil, fmt.Errorf("unable to configure UDN nftables: %w", err) + } + } } var subnets []*net.IPNet @@ -1967,12 +2373,12 @@ func newNodePortWatcher(gwBridge *bridgeConfiguration, ofm *openflowManager, gatewayIPv4: gatewayIPv4, gatewayIPv6: gatewayIPv6, ofportPhys: ofportPhys, - ofportPatch: ofportPatch, gwBridge: gwBridge.bridgeName, serviceInfo: make(map[ktypes.NamespacedName]*serviceConfig), nodeIPManager: nodeIPManager, ofm: ofm, watchFactory: watchFactory, + nadController: nadController, } return npw, nil } diff --git a/go-controller/pkg/node/gateway_udn.go b/go-controller/pkg/node/gateway_udn.go index aca77bbfb9..6e7fb72a7b 100644 --- a/go-controller/pkg/node/gateway_udn.go +++ b/go-controller/pkg/node/gateway_udn.go @@ -1,30 +1,37 @@ package node import ( + "context" "fmt" "net" - "strings" "time" v1 "k8s.io/api/core/v1" listers "k8s.io/client-go/listers/core/v1" "k8s.io/klog/v2" utilnet "k8s.io/utils/net" + "k8s.io/utils/ptr" + "sigs.k8s.io/knftables" + + "github.com/vishvananda/netlink" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/generator/udn" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/iprulemanager" + nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/vrfmanager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" - "github.com/vishvananda/netlink" ) const ( // ctMarkUDNBase is the conntrack mark base value for user defined networks to use // Each network gets its own mark == base + network-id ctMarkUDNBase = 3 + // pktMarkBase is the base value for packet mark assigned to user defined networks + // Each network has a packet mark equal to base + network-id + pktMarkBase = 4096 // waitForPatchPortTimeout is the maximum time we wait for a UDN's patch // port to be created by OVN. waitForPatchPortTimeout = 30 * time.Second @@ -52,10 +59,13 @@ type UserDefinedNetworkGateway struct { // masqCTMark holds the mark value for this network // which is used for egress traffic in shared gateway mode masqCTMark uint - // v4MasqIP holds the IPv4 masquerade IP for this network - v4MasqIP *net.IPNet - // v6MasqIP holds the IPv6 masquerade IP for this network - v6MasqIP *net.IPNet + // pktMark hold the packets mark value for this network + // which is used for directing traffic towards the UDN + pktMark uint + // v4MasqIPs holds the IPv4 masquerade IPs for this network + v4MasqIPs *udn.MasqueradeIPs + // v6MasqIPs holds the IPv6 masquerade IPs for this network + v6MasqIPs *udn.MasqueradeIPs // stores the pointer to default network's gateway so that // we can leverage it from here to program UDN flows on breth0 // Currently we use the openflowmanager and nodeIPManager from @@ -83,7 +93,7 @@ func (b *bridgeConfiguration) getBridgePortConfigurations() ([]bridgeUDNConfigur } // addNetworkBridgeConfig adds the patchport and ctMark value for the provided netInfo into the bridge configuration cache -func (b *bridgeConfiguration) addNetworkBridgeConfig(nInfo util.NetInfo, masqCTMark uint, v4MasqIP, v6MasqIP *net.IPNet) { +func (b *bridgeConfiguration) addNetworkBridgeConfig(nInfo util.NetInfo, masqCTMark, pktMark uint, v6MasqIPs, v4MasqIPs *udn.MasqueradeIPs) { b.Lock() defer b.Unlock() @@ -95,8 +105,9 @@ func (b *bridgeConfiguration) addNetworkBridgeConfig(nInfo util.NetInfo, masqCTM netConfig := &bridgeUDNConfiguration{ patchPort: patchPort, masqCTMark: fmt.Sprintf("0x%x", masqCTMark), - v4MasqIP: v4MasqIP, - v6MasqIP: v6MasqIP, + pktMark: fmt.Sprintf("0x%x", pktMark), + v4MasqIPs: v4MasqIPs, + v6MasqIPs: v6MasqIPs, } b.netConfig[netName] = netConfig @@ -114,6 +125,22 @@ func (b *bridgeConfiguration) delNetworkBridgeConfig(nInfo util.NetInfo) { delete(b.netConfig, nInfo.GetNetworkName()) } +// getActiveNetworkBridgeConfig returns a copy of the network configuration corresponding to the +// provided netInfo. +// +// NOTE: if the network configuration can't be found or if the network is not patched by OVN +// yet this returns nil. +func (b *bridgeConfiguration) getActiveNetworkBridgeConfig(networkName string) *bridgeUDNConfiguration { + b.Lock() + defer b.Unlock() + + if netConfig, found := b.netConfig[networkName]; found && netConfig.ofPortPatch != "" { + result := *netConfig + return &result + } + return nil +} + func (b *bridgeConfiguration) patchedNetConfigs() []*bridgeUDNConfiguration { result := make([]*bridgeUDNConfiguration, 0, len(b.netConfig)) for _, netConfig := range b.netConfig { @@ -133,8 +160,13 @@ type bridgeUDNConfiguration struct { patchPort string ofPortPatch string masqCTMark string - v4MasqIP *net.IPNet - v6MasqIP *net.IPNet + pktMark string + v4MasqIPs *udn.MasqueradeIPs + v6MasqIPs *udn.MasqueradeIPs +} + +func (netConfig *bridgeUDNConfiguration) isDefaultNetwork() bool { + return netConfig.masqCTMark == ctMarkOVN } func (netConfig *bridgeUDNConfiguration) setBridgeNetworkOfPortsInternal() error { @@ -163,28 +195,28 @@ func NewUserDefinedNetworkGateway(netInfo util.NetInfo, networkID int, node *v1. defaultNetworkGateway Gateway) (*UserDefinedNetworkGateway, error) { // Generate a per network conntrack mark and masquerade IPs to be used for egress traffic. var ( - v4MasqIP *net.IPNet - v6MasqIP *net.IPNet + v4MasqIPs *udn.MasqueradeIPs + v6MasqIPs *udn.MasqueradeIPs + err error ) masqCTMark := ctMarkUDNBase + uint(networkID) + pktMark := pktMarkBase + uint(networkID) if config.IPv4Mode { - v4MasqIPs, err := udn.AllocateV4MasqueradeIPs(networkID) + v4MasqIPs, err = udn.AllocateV4MasqueradeIPs(networkID) if err != nil { return nil, fmt.Errorf("failed to get v4 masquerade IP, network %s (%d): %v", netInfo.GetNetworkName(), networkID, err) } - v4MasqIP = v4MasqIPs.GatewayRouter } if config.IPv6Mode { - v6MasqIPs, err := udn.AllocateV6MasqueradeIPs(networkID) + v6MasqIPs, err = udn.AllocateV6MasqueradeIPs(networkID) if err != nil { return nil, fmt.Errorf("failed to get v6 masquerade IP, network %s (%d): %v", netInfo.GetNetworkName(), networkID, err) } - v6MasqIP = v6MasqIPs.GatewayRouter } gw, ok := defaultNetworkGateway.(*gateway) if !ok { - return nil, fmt.Errorf("unable to deference default node network controller gateway object") + return nil, fmt.Errorf("unable to dereference default node network controller gateway object") } return &UserDefinedNetworkGateway{ @@ -195,19 +227,69 @@ func NewUserDefinedNetworkGateway(netInfo util.NetInfo, networkID int, node *v1. kubeInterface: kubeInterface, vrfManager: vrfManager, masqCTMark: masqCTMark, - v4MasqIP: v4MasqIP, - v6MasqIP: v6MasqIP, + pktMark: pktMark, + v4MasqIPs: v4MasqIPs, + v6MasqIPs: v6MasqIPs, gateway: gw, ruleManager: ruleManager, }, nil } +// GetUDNMarkChain returns the UDN mark chain name +func GetUDNMarkChain(pktMark string) string { + return "udn-mark-" + pktMark +} + +// delMarkChain removes the UDN packet mark nftables chain +func (udng *UserDefinedNetworkGateway) delMarkChain() error { + nft, err := nodenft.GetNFTablesHelper() + if err != nil { + return err + } + tx := nft.NewTransaction() + chain := &knftables.Chain{ + Name: GetUDNMarkChain(fmt.Sprintf("0x%x", udng.pktMark)), + } + tx.Flush(chain) + tx.Delete(chain) + return nft.Run(context.TODO(), tx) +} + +// addMarkChain adds the UDN nftables chain containing a rule that marks packets +// with the network specific value +func (udng *UserDefinedNetworkGateway) addMarkChain() error { + counterIfDebug := "" + if config.Logging.Level > 4 { + counterIfDebug = "counter" + } + + nft, err := nodenft.GetNFTablesHelper() + if err != nil { + return err + } + tx := nft.NewTransaction() + chain := &knftables.Chain{ + Name: GetUDNMarkChain(fmt.Sprintf("0x%x", udng.pktMark)), + Comment: ptr.To(fmt.Sprintf("%s: UDN packet marking", udng.GetNetworkName())), + } + tx.Add(chain) + tx.Flush(chain) + + tx.Add(&knftables.Rule{ + Chain: chain.Name, + Rule: knftables.Concat("meta mark set", fmt.Sprintf("0x%x", udng.pktMark), counterIfDebug), + }) + + return nft.Run(context.TODO(), tx) +} + // AddNetwork will be responsible to create all plumbings // required by this UDN on the gateway side func (udng *UserDefinedNetworkGateway) AddNetwork() error { // port is created first and its MAC address configured. The IP(s) on that link are added after enslaving to a VRF device (addUDNManagementPortIPs) // because IPv6 addresses are removed by the kernel (if not link local) when enslaved to a VRF device. - mplink, macAddress, err := udng.addUDNManagementPort() + // Add the routes after setting the IP(s) to ensure that the default subnet route towards the mgmt network exists. + mplink, err := udng.addUDNManagementPort() if err != nil { return fmt.Errorf("could not create management port netdevice for network %s: %w", udng.GetNetworkName(), err) } @@ -217,14 +299,14 @@ func (udng *UserDefinedNetworkGateway) AddNetwork() error { if err != nil { return fmt.Errorf("failed to compute routes for network %s, err: %v", udng.GetNetworkName(), err) } - if err = udng.vrfManager.AddVRF(vrfDeviceName, mplink.Attrs().Name, uint32(vrfTableId), routes); err != nil { + if err = udng.vrfManager.AddVRF(vrfDeviceName, mplink.Attrs().Name, uint32(vrfTableId), nil); err != nil { return fmt.Errorf("could not add VRF %d for network %s, err: %v", vrfTableId, udng.GetNetworkName(), err) } if err = udng.addUDNManagementPortIPs(mplink); err != nil { return fmt.Errorf("unable to add management port IP(s) for link %s, for network %s: %w", mplink.Attrs().Name, udng.GetNetworkName(), err) } - if err := util.UpdateNodeManagementPortMACAddressesWithRetry(udng.node, udng.nodeLister, udng.kubeInterface, macAddress, udng.GetNetworkName()); err != nil { - return fmt.Errorf("unable to update mac address annotation for node %s, for network %s, err: %w", udng.node.Name, udng.GetNetworkName(), err) + if err = udng.vrfManager.AddVRFRoutes(vrfDeviceName, routes); err != nil { + return fmt.Errorf("could not add VRF %s routes for network %s, err: %v", vrfDeviceName, udng.GetNetworkName(), err) } // create the iprules for this network udnReplyIPRules, err := udng.constructUDNVRFIPRules(vrfTableId) @@ -236,17 +318,24 @@ func (udng *UserDefinedNetworkGateway) AddNetwork() error { return fmt.Errorf("unable to create iprule %v for network %s, err: %v", rule, udng.GetNetworkName(), err) } } + // add loose mode for rp filter on management port + mgmtPortName := util.GetNetworkScopedK8sMgmtHostIntfName(uint(udng.networkID)) + if err := addRPFilterLooseModeForManagementPort(mgmtPortName); err != nil { + return fmt.Errorf("could not set loose mode for reverse path filtering on management port %s: %v", mgmtPortName, err) + } if udng.openflowManager != nil { - udng.openflowManager.addNetwork(udng.NetInfo, udng.masqCTMark, udng.v4MasqIP, udng.v6MasqIP) + udng.openflowManager.addNetwork(udng.NetInfo, udng.masqCTMark, udng.pktMark, udng.v6MasqIPs, udng.v4MasqIPs) waiter := newStartupWaiterWithTimeout(waitForPatchPortTimeout) readyFunc := func() (bool, error) { if err := setBridgeNetworkOfPorts(udng.openflowManager.defaultBridge, udng.GetNetworkName()); err != nil { - return false, fmt.Errorf("failed to set network %s's openflow ports for default bridge; error: %v", udng.GetNetworkName(), err) + klog.V(3).Infof("Failed to set network %s's openflow ports for default bridge; error: %v", udng.GetNetworkName(), err) + return false, nil } if udng.openflowManager.externalGatewayBridge != nil { if err := setBridgeNetworkOfPorts(udng.openflowManager.externalGatewayBridge, udng.GetNetworkName()); err != nil { - return false, fmt.Errorf("failed to set network %s's openflow ports for secondary bridge; error: %v", udng.GetNetworkName(), err) + klog.V(3).Infof("Failed to set network %s's openflow ports for secondary bridge; error: %v", udng.GetNetworkName(), err) + return false, nil } } return true, nil @@ -265,6 +354,9 @@ func (udng *UserDefinedNetworkGateway) AddNetwork() error { klog.Warningf("Openflow manager has not been invoked for network %s; we will skip programming flows"+ "on the bridge for this network.", udng.NetInfo.GetNetworkName()) } + if err := udng.addMarkChain(); err != nil { + return fmt.Errorf("failed to add the service masquerade chain: %w", err) + } return nil } @@ -291,63 +383,74 @@ func (udng *UserDefinedNetworkGateway) DelNetwork() error { return fmt.Errorf("failed to reconcile default gateway for network %s, err: %v", udng.GetNetworkName(), err) } } + if err := udng.delMarkChain(); err != nil { + return err + } // delete the management port interface for this network return udng.deleteUDNManagementPort() } // addUDNManagementPort does the following: // STEP1: creates the (netdevice) OVS interface on br-int for the UDN's management port -// STEP2: It saves the MAC address generated on the 1st go as an option on the OVS interface -// so that it persists on reboots -// STEP3: sets up the management port link on the host +// STEP2: sets up the management port link on the host +// STEP3: enables IPv4 forwarding on the interface if the network has a v4 subnet // Returns a netlink Link which is the UDN management port interface along with its MAC address -func (udng *UserDefinedNetworkGateway) addUDNManagementPort() (netlink.Link, net.HardwareAddr, error) { +func (udng *UserDefinedNetworkGateway) addUDNManagementPort() (netlink.Link, error) { var err error interfaceName := util.GetNetworkScopedK8sMgmtHostIntfName(uint(udng.networkID)) + networkLocalSubnets, err := udng.getLocalSubnets() + if err != nil { + return nil, err + } + if len(networkLocalSubnets) == 0 { + return nil, fmt.Errorf("cannot determine subnets while configuring management port for network: %s", udng.GetNetworkName()) + } + macAddr := util.IPAddrToHWAddr(util.GetNodeManagementIfAddr(networkLocalSubnets[0]).IP) + // STEP1 stdout, stderr, err := util.RunOVSVsctl( "--", "--may-exist", "add-port", "br-int", interfaceName, - "--", "set", "interface", interfaceName, + "--", "set", "interface", interfaceName, fmt.Sprintf("mac=\"%s\"", macAddr.String()), "type=internal", "mtu_request="+fmt.Sprintf("%d", udng.NetInfo.MTU()), "external-ids:iface-id="+udng.GetNetworkScopedK8sMgmtIntfName(udng.node.Name), ) if err != nil { - return nil, nil, fmt.Errorf("failed to add port to br-int for network %s, stdout: %q, stderr: %q, error: %w", + return nil, fmt.Errorf("failed to add port to br-int for network %s, stdout: %q, stderr: %q, error: %w", udng.GetNetworkName(), stdout, stderr, err) } klog.V(3).Infof("Added OVS management port interface %s for network %s", interfaceName, udng.GetNetworkName()) // STEP2 - macAddress, err := util.GetOVSPortMACAddress(interfaceName) - if err != nil { - return nil, nil, fmt.Errorf("failed to get management port MAC address for network %s: %v", udng.GetNetworkName(), err) - } - // persist the MAC address so that upon node reboot we get back the same mac address. - _, stderr, err = util.RunOVSVsctl("set", "interface", interfaceName, - fmt.Sprintf("mac=%s", strings.ReplaceAll(macAddress.String(), ":", "\\:"))) - if err != nil { - return nil, nil, fmt.Errorf("failed to persist MAC address %q for %q while plumbing network %s: stderr:%s (%v)", - macAddress.String(), interfaceName, udng.GetNetworkName(), stderr, err) - } - - // STEP3 mplink, err := util.LinkSetUp(interfaceName) if err != nil { - return nil, nil, fmt.Errorf("failed to set the link up for interface %s while plumbing network %s, err: %v", + return nil, fmt.Errorf("failed to set the link up for interface %s while plumbing network %s, err: %v", interfaceName, udng.GetNetworkName(), err) } klog.V(3).Infof("Setup management port link %s for network %s succeeded", interfaceName, udng.GetNetworkName()) - return mplink, macAddress, nil + + // STEP3 + // IPv6 forwarding is enabled globally + if ipv4, _ := udng.IPMode(); ipv4 { + stdout, stderr, err := util.RunSysctl("-w", fmt.Sprintf("net.ipv4.conf.%s.forwarding=1", interfaceName)) + if err != nil || stdout != fmt.Sprintf("net.ipv4.conf.%s.forwarding = 1", interfaceName) { + return nil, fmt.Errorf("could not set the correct forwarding value for interface %s: stdout: %v, stderr: %v, err: %v", + interfaceName, stdout, stderr, err) + } + } + return mplink, nil } -func (udng *UserDefinedNetworkGateway) addUDNManagementPortIPs(mpLink netlink.Link) error { - var err error +// getLocalSubnets returns pod subnets used by the current node. +// For L3 networks it parses the ovnNodeSubnets annotation, for L2 networks it returns the network subnets. +func (udng *UserDefinedNetworkGateway) getLocalSubnets() ([]*net.IPNet, error) { var networkLocalSubnets []*net.IPNet + var err error + // fetch subnets which we will use to get management port IP(s) if udng.TopologyType() == types.Layer3Topology { networkLocalSubnets, err = util.ParseNodeHostSubnetAnnotation(udng.node, udng.GetNetworkName()) if err != nil { - return fmt.Errorf("waiting for node %s to start, no annotation found on node for network %s: %w", + return nil, fmt.Errorf("waiting for node %s to start, no annotation found on node for network %s: %w", udng.node.Name, udng.GetNetworkName(), err) } } else if udng.TopologyType() == types.Layer2Topology { @@ -357,6 +460,15 @@ func (udng *UserDefinedNetworkGateway) addUDNManagementPortIPs(mpLink netlink.Li networkLocalSubnets = append(networkLocalSubnets, globalFlatL2Network.CIDR) } } + return networkLocalSubnets, nil +} + +func (udng *UserDefinedNetworkGateway) addUDNManagementPortIPs(mpLink netlink.Link) error { + networkLocalSubnets, err := udng.getLocalSubnets() + if err != nil { + return err + } + // extract management port IP from subnets and add it to link for _, subnet := range networkLocalSubnets { if config.IPv6Mode && utilnet.IsIPv6CIDR(subnet) || config.IPv4Mode && utilnet.IsIPv4CIDR(subnet) { @@ -390,11 +502,6 @@ func (udng *UserDefinedNetworkGateway) deleteUDNManagementPort() error { udng.GetNetworkName(), stdout, stderr, err) } klog.V(3).Infof("Removed OVS management port interface %s for network %s", interfaceName, udng.GetNetworkName()) - // sending nil mac address will delete the network's annotation value - if err := util.UpdateNodeManagementPortMACAddressesWithRetry(udng.node, udng.nodeLister, udng.kubeInterface, nil, udng.GetNetworkName()); err != nil { - return fmt.Errorf("unable to remove mac address annotation for node %s, for network %s, err: %v", udng.node.Name, udng.GetNetworkName(), err) - } - klog.V(3).Infof("Removed management port mac address information of %s for network %s", interfaceName, udng.GetNetworkName()) return nil } @@ -478,6 +585,46 @@ func (udng *UserDefinedNetworkGateway) computeRoutesForUDN(vrfTableId int, mpLin }) } + // Add routes for V[4|6]HostETPLocalMasqueradeIP: + // 169.254.0.3 via 100.100.1.1 dev ovn-k8s-mp1 + // For Layer3 networks add the cluster subnet route + // 100.100.0.0/16 via 100.100.1.1 dev ovn-k8s-mp1 + networkLocalSubnets, err := udng.getLocalSubnets() + if err != nil { + return nil, err + } + for _, localSubnet := range networkLocalSubnets { + gwIP := util.GetNodeGatewayIfAddr(localSubnet) + if gwIP == nil { + return nil, fmt.Errorf("unable to find gateway IP for network %s, subnet: %s", udng.GetNetworkName(), localSubnet) + } + etpLocalMasqueradeIP := config.Gateway.MasqueradeIPs.V4HostETPLocalMasqueradeIP + if utilnet.IsIPv6CIDR(localSubnet) { + etpLocalMasqueradeIP = config.Gateway.MasqueradeIPs.V6HostETPLocalMasqueradeIP + } + retVal = append(retVal, netlink.Route{ + LinkIndex: mpLink.Attrs().Index, + Dst: &net.IPNet{ + IP: etpLocalMasqueradeIP, + Mask: util.GetIPFullMask(etpLocalMasqueradeIP), + }, + Gw: gwIP.IP, + Table: vrfTableId, + }) + if udng.NetInfo.TopologyType() == types.Layer3Topology { + for _, clusterSubnet := range udng.Subnets() { + if clusterSubnet.CIDR.Contains(gwIP.IP) { + retVal = append(retVal, netlink.Route{ + LinkIndex: mpLink.Attrs().Index, + Dst: clusterSubnet.CIDR, + Gw: gwIP.IP, + Table: vrfTableId, + }) + } + } + } + } + return retVal, nil } @@ -505,30 +652,45 @@ func (udng *UserDefinedNetworkGateway) getV6MasqueradeIP() (*net.IPNet, error) { return util.GetIPNetFullMaskFromIP(masqIPs.ManagementPort.IP), nil } -// constructUDNVRFIPRules constructs rules that redirect packets towards the per-UDN masquerade IP +// constructUDNVRFIPRules constructs rules that redirect matching packets // into the corresponding UDN VRF routing table. // Example: +// 2000: from all fwmark 0x1001 lookup 1007 // 2000: from all to 169.254.0.12 lookup 1007 +// 2000: from all fwmark 0x1002 lookup 1009 // 2000: from all to 169.254.0.14 lookup 1009 func (udng *UserDefinedNetworkGateway) constructUDNVRFIPRules(vrfTableId int) ([]netlink.Rule, error) { - var masqIPRules []netlink.Rule + var ipRules []netlink.Rule masqIPv4, err := udng.getV4MasqueradeIP() if err != nil { return nil, err } if masqIPv4 != nil { - masqIPRules = append(masqIPRules, generateIPRuleForMasqIP(masqIPv4.IP, false, uint(vrfTableId))) + ipRules = append(ipRules, generateIPRuleForPacketMark(udng.pktMark, false, uint(vrfTableId))) + ipRules = append(ipRules, generateIPRuleForMasqIP(masqIPv4.IP, false, uint(vrfTableId))) } masqIPv6, err := udng.getV6MasqueradeIP() if err != nil { return nil, err } if masqIPv6 != nil { - masqIPRules = append(masqIPRules, generateIPRuleForMasqIP(masqIPv6.IP, true, uint(vrfTableId))) + ipRules = append(ipRules, generateIPRuleForPacketMark(udng.pktMark, true, uint(vrfTableId))) + ipRules = append(ipRules, generateIPRuleForMasqIP(masqIPv6.IP, true, uint(vrfTableId))) } - return masqIPRules, nil + return ipRules, nil } +func generateIPRuleForPacketMark(mark uint, isIPv6 bool, vrfTableId uint) netlink.Rule { + r := *netlink.NewRule() + r.Table = int(vrfTableId) + r.Priority = UDNMasqueradeIPRulePriority + r.Family = netlink.FAMILY_V4 + if isIPv6 { + r.Family = netlink.FAMILY_V6 + } + r.Mark = int(mark) + return r +} func generateIPRuleForMasqIP(masqIP net.IP, isIPv6 bool, vrfTableId uint) netlink.Rule { r := *netlink.NewRule() r.Table = int(vrfTableId) @@ -540,3 +702,18 @@ func generateIPRuleForMasqIP(masqIP net.IP, isIPv6 bool, vrfTableId uint) netlin r.Dst = util.GetIPNetFullMaskFromIP(masqIP) return r } + +func addRPFilterLooseModeForManagementPort(mgmtPortName string) error { + // update the reverse path filtering options for ovn-k8s-mpX interface to avoid dropping packets with masqueradeIP + // coming out of managementport interface + // NOTE: v6 doesn't have rp_filter strict mode block + rpFilterLooseMode := "2" + // TODO: Convert testing framework to mock golang module utilities. Example: + // result, err := sysctl.Sysctl(fmt.Sprintf("net/ipv4/conf/%s/rp_filter", types.K8sMgmtIntfName), rpFilterLooseMode) + stdout, stderr, err := util.RunSysctl("-w", fmt.Sprintf("net.ipv4.conf.%s.rp_filter=%s", mgmtPortName, rpFilterLooseMode)) + if err != nil || stdout != fmt.Sprintf("net.ipv4.conf.%s.rp_filter = %s", mgmtPortName, rpFilterLooseMode) { + return fmt.Errorf("could not set the correct rp_filter value for interface %s: stdout: %v, stderr: %v, err: %v", + mgmtPortName, stdout, stderr, err) + } + return nil +} diff --git a/go-controller/pkg/node/gateway_udn_test.go b/go-controller/pkg/node/gateway_udn_test.go index cdfa3351cd..6ccfa01409 100644 --- a/go-controller/pkg/node/gateway_udn_test.go +++ b/go-controller/pkg/node/gateway_udn_test.go @@ -3,6 +3,7 @@ package node import ( "fmt" "net" + "sort" "strings" "sync" "testing" @@ -10,7 +11,7 @@ import ( "github.com/containernetworking/plugins/pkg/ns" "github.com/containernetworking/plugins/pkg/testutils" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" . "github.com/onsi/gomega" "github.com/stretchr/testify/mock" @@ -18,36 +19,42 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" + utilnet "k8s.io/utils/net" "k8s.io/utils/ptr" + nadfake "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/fake" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + udnfakeclient "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/fake" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" factoryMocks "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory/mocks" kubemocks "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube/mocks" + networkAttachDefController "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/network-attach-def-controller" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/iprulemanager" + nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/routemanager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/vrfmanager" ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" coreinformermocks "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/mocks/k8s.io/client-go/informers/core/v1" v1mocks "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/mocks/k8s.io/client-go/listers/core/v1" + fakenad "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/nad" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) -func getCreationFakeOVSCommands(fexec *ovntest.FakeExec, mgtPort, mgtPortMAC, netName, nodeName string, mtu int) { +func getCreationFakeCommands(fexec *ovntest.FakeExec, mgtPort, mgtPortMAC, netName, nodeName string, mtu int) { fexec.AddFakeCmdsNoOutputNoError([]string{ "ovs-vsctl --timeout=15" + " -- --may-exist add-port br-int " + mgtPort + " -- set interface " + mgtPort + + fmt.Sprintf(" mac=\"%s\"", mgtPortMAC) + " type=internal mtu_request=" + fmt.Sprintf("%d", mtu) + " external-ids:iface-id=" + types.K8sPrefix + netName + "_" + nodeName, }) + fexec.AddFakeCmd(&ovntest.ExpectedCmd{ - Cmd: "ovs-vsctl --timeout=15 --if-exists get interface " + mgtPort + " mac_in_use", - Output: mgtPortMAC, - }) - fexec.AddFakeCmdsNoOutputNoError([]string{ - "ovs-vsctl --timeout=15 set interface " + mgtPort + " " + fmt.Sprintf("mac=%s", strings.ReplaceAll(mgtPortMAC, ":", "\\:")), + Cmd: "sysctl -w net.ipv4.conf." + mgtPort + ".forwarding=1", + Output: "net.ipv4.conf." + mgtPort + ".forwarding = 1", }) } @@ -58,6 +65,13 @@ func getVRFCreationFakeOVSCommands(fexec *ovntest.FakeExec) { }) } +func getRPFilterLooseModeFakeCommands(fexec *ovntest.FakeExec) { + fexec.AddFakeCmd(&ovntest.ExpectedCmd{ + Cmd: "sysctl -w net.ipv4.conf.ovn-k8s-mp3.rp_filter=2", + Output: "net.ipv4.conf.ovn-k8s-mp3.rp_filter = 2", + }) +} + func getDeletionFakeOVSCommands(fexec *ovntest.FakeExec, mgtPort string) { fexec.AddFakeCmdsNoOutputNoError([]string{ "ovs-vsctl --timeout=15 -- --if-exists del-port br-int " + mgtPort, @@ -152,10 +166,6 @@ func setUpGatewayFakeOVSCommands(fexec *ovntest.FakeExec) { Cmd: "sysctl -w net.ipv4.conf.ovn-k8s-mp0.rp_filter=2", Output: "net.ipv4.conf.ovn-k8s-mp0.rp_filter = 2", }) - fexec.AddFakeCmd(&ovntest.ExpectedCmd{ - Cmd: "ovs-vsctl --timeout=15 --if-exists get interface patch-breth0_worker1-to-br-int ofport", - Output: "5", - }) fexec.AddFakeCmd(&ovntest.ExpectedCmd{ Cmd: "ovs-vsctl --timeout=15 --if-exists get interface breth0 ofport", Output: "7", @@ -173,6 +183,107 @@ func setUpUDNOpenflowManagerFakeOVSCommands(fexec *ovntest.FakeExec) { }) } +func setUpUDNOpenflowManagerCheckPortsFakeOVSCommands(fexec *ovntest.FakeExec) { + // Default and UDN patch port + fexec.AddFakeCmd(&ovntest.ExpectedCmd{ + Cmd: "ovs-vsctl --timeout=15 --if-exists get Interface patch-breth0_bluenet_worker1-to-br-int ofport", + Output: "15", + }) + fexec.AddFakeCmd(&ovntest.ExpectedCmd{ + Cmd: "ovs-vsctl --timeout=15 --if-exists get Interface patch-breth0_worker1-to-br-int ofport", + Output: "5", + }) + fexec.AddFakeCmd(&ovntest.ExpectedCmd{ + Cmd: "ovs-vsctl --timeout=15 --if-exists get interface breth0 ofport", + Output: "7", + }) + + // After simulated deletion. + fexec.AddFakeCmd(&ovntest.ExpectedCmd{ + Cmd: "ovs-vsctl --timeout=15 --if-exists get Interface patch-breth0_bluenet_worker1-to-br-int ofport", + Output: "", + }) + fexec.AddFakeCmd(&ovntest.ExpectedCmd{ + Cmd: "ovs-vsctl --timeout=15 --if-exists get Interface patch-breth0_worker1-to-br-int ofport", + Output: "5", + }) + fexec.AddFakeCmd(&ovntest.ExpectedCmd{ + Cmd: "ovs-vsctl --timeout=15 --if-exists get interface breth0 ofport", + Output: "7", + }) +} + +func openflowManagerCheckPorts(ofMgr *openflowManager) { + netConfigs, uplink, ofPortPhys := ofMgr.getDefaultBridgePortConfigurations() + sort.SliceStable(netConfigs, func(i, j int) bool { + return netConfigs[i].patchPort < netConfigs[j].patchPort + }) + checkPorts(netConfigs, uplink, ofPortPhys) +} + +func checkDefaultSvcIsolationOVSFlows(flows []string, defaultConfig *bridgeUDNConfiguration, ofPortHost, bridgeMAC string, svcCIDR *net.IPNet) { + By(fmt.Sprintf("Checking default service isolation flows for %s", svcCIDR.String())) + + var masqIP string + var masqSubnet string + var protoPrefix string + if utilnet.IsIPv4CIDR(svcCIDR) { + protoPrefix = "ip" + masqIP = config.Gateway.MasqueradeIPs.V4HostMasqueradeIP.String() + masqSubnet = config.Gateway.V4MasqueradeSubnet + } else { + protoPrefix = "ip6" + masqIP = config.Gateway.MasqueradeIPs.V6HostMasqueradeIP.String() + masqSubnet = config.Gateway.V6MasqueradeSubnet + } + + var nTable0DefaultFlows int + var nTable0UDNMasqFlows int + var nTable2Flows int + for _, flow := range flows { + if strings.Contains(flow, fmt.Sprintf("priority=500, in_port=%s, %s, %s_dst=%s, actions=ct(commit,zone=%d,nat(src=%s),table=2)", + ofPortHost, protoPrefix, protoPrefix, svcCIDR, config.Default.HostMasqConntrackZone, + masqIP)) { + nTable0DefaultFlows++ + } else if strings.Contains(flow, fmt.Sprintf("priority=550, in_port=%s, %s, %s_src=%s, %s_dst=%s, actions=ct(commit,zone=%d,table=2)", + ofPortHost, protoPrefix, protoPrefix, masqSubnet, protoPrefix, svcCIDR, config.Default.HostMasqConntrackZone)) { + nTable0UDNMasqFlows++ + } else if strings.Contains(flow, fmt.Sprintf("priority=100, table=2, actions=set_field:%s->eth_dst,output:%s", + bridgeMAC, defaultConfig.ofPortPatch)) { + nTable2Flows++ + } + } + + Expect(nTable0DefaultFlows).To(Equal(1)) + Expect(nTable0UDNMasqFlows).To(Equal(1)) + Expect(nTable2Flows).To(Equal(1)) +} + +func checkUDNSvcIsolationOVSFlows(flows []string, netConfig *bridgeUDNConfiguration, netName, ofPortHost, bridgeMAC string, svcCIDR *net.IPNet, expectedNFlows int) { + By(fmt.Sprintf("Checking UDN %s service isolation flows for %s; expected %d flows", + netName, svcCIDR.String(), expectedNFlows)) + + var mgmtMasqIP string + var protoPrefix string + if utilnet.IsIPv4CIDR(svcCIDR) { + mgmtMasqIP = netConfig.v4MasqIPs.ManagementPort.IP.String() + protoPrefix = "ip" + } else { + mgmtMasqIP = netConfig.v6MasqIPs.ManagementPort.IP.String() + protoPrefix = "ip6" + } + + var nFlows int + for _, flow := range flows { + if strings.Contains(flow, fmt.Sprintf("priority=200, table=2, %s, %s_src=%s, actions=set_field:%s->eth_dst,output:%s", + protoPrefix, protoPrefix, mgmtMasqIP, bridgeMAC, netConfig.ofPortPatch)) { + nFlows++ + } + } + + Expect(nFlows).To(Equal(expectedNFlows)) +} + var _ = Describe("UserDefinedNetworkGateway", func() { var ( netName = "bluenet" @@ -190,14 +301,17 @@ var _ = Describe("UserDefinedNetworkGateway", func() { wg sync.WaitGroup stopCh chan struct{} v4NodeSubnet = "100.128.0.0/24" - v6NodeSubnet = "ae70::66/112" + v6NodeSubnet = "ae70::/64" mgtPort = fmt.Sprintf("%s%s", types.K8sMgmtIntfNamePrefix, netID) v4NodeIP = "192.168.1.10/24" v6NodeIP = "fc00:f853:ccd:e793::3/64" ) BeforeEach(func() { // Restore global default values before each testcase - config.PrepareTestConfig() + err := config.PrepareTestConfig() + Expect(err).NotTo(HaveOccurred()) + config.OVNKubernetesFeature.EnableMultiNetwork = true + config.OVNKubernetesFeature.EnableNetworkSegmentation = true // Use a larger masq subnet to allow OF manager to allocate IPs for UDNs. config.Gateway.V6MasqueradeSubnet = "fd69::/112" config.Gateway.V4MasqueradeSubnet = "169.254.0.0/17" @@ -205,7 +319,6 @@ var _ = Describe("UserDefinedNetworkGateway", func() { fexec = ovntest.NewFakeExec() Expect(util.SetExec(fexec)).To(Succeed()) // Set up a fake k8sMgmt interface - var err error testNS, err = testutils.NewNS() Expect(err).NotTo(HaveOccurred()) err = testNS.Do(func(ns.NetNS) error { @@ -267,28 +380,29 @@ var _ = Describe("UserDefinedNetworkGateway", func() { }, } nad := ovntest.GenerateNAD(netName, "rednad", "greenamespace", - types.Layer3Topology, "100.128.0.0/16/24,ae70::66/60", types.NetworkRolePrimary) + types.Layer3Topology, "100.128.0.0/16/24,ae70::/60/64", types.NetworkRolePrimary) netInfo, err := util.ParseNADInfo(nad) Expect(err).NotTo(HaveOccurred()) udnGateway, err := NewUserDefinedNetworkGateway(netInfo, 3, node, factoryMock.NodeCoreInformer().Lister(), &kubeMock, vrf, nil, &gateway{}) Expect(err).NotTo(HaveOccurred()) - getCreationFakeOVSCommands(fexec, mgtPort, mgtPortMAC, netName, nodeName, netInfo.MTU()) + _, ipNet, err := net.ParseCIDR(v4NodeSubnet) + Expect(err).NotTo(HaveOccurred()) + mgtPortMAC = util.IPAddrToHWAddr(util.GetNodeManagementIfAddr(ipNet).IP).String() + getCreationFakeCommands(fexec, mgtPort, mgtPortMAC, netName, nodeName, netInfo.MTU()) nodeLister.On("Get", mock.AnythingOfType("string")).Return(node, nil) factoryMock.On("GetNode", "worker1").Return(node, nil) - cnode := node.DeepCopy() - cnode.Annotations[util.OvnNodeManagementPortMacAddresses] = `{"bluenet":"00:00:00:55:66:77"}` - kubeMock.On("UpdateNodeStatus", cnode).Return(nil) + err = testNS.Do(func(ns.NetNS) error { defer GinkgoRecover() - mpLink, _, err := udnGateway.addUDNManagementPort() + mpLink, err := udnGateway.addUDNManagementPort() Expect(err).NotTo(HaveOccurred()) Expect(mpLink).NotTo(BeNil()) Expect(udnGateway.addUDNManagementPortIPs(mpLink)).Should(Succeed()) exists, err := util.LinkAddrExist(mpLink, ovntest.MustParseIPNet("100.128.0.2/24")) Expect(err).NotTo(HaveOccurred()) Expect(exists).To(BeTrue()) - exists, err = util.LinkAddrExist(mpLink, ovntest.MustParseIPNet("ae70::2/112")) + exists, err = util.LinkAddrExist(mpLink, ovntest.MustParseIPNet("ae70::2/64")) Expect(err).NotTo(HaveOccurred()) Expect(exists).To(BeTrue()) return nil @@ -307,7 +421,10 @@ var _ = Describe("UserDefinedNetworkGateway", func() { }, } nad := ovntest.GenerateNAD(netName, "rednad", "greenamespace", - types.Layer3Topology, "100.128.0.0/16/24,ae70::66/60", types.NetworkRolePrimary) + types.Layer3Topology, "100.128.0.0/16/24,ae70::/60/64", types.NetworkRolePrimary) + // must be defined so that the primary user defined network can match the ip families of the underlying cluster + config.IPv4Mode = true + config.IPv6Mode = true netInfo, err := util.ParseNADInfo(nad) Expect(err).NotTo(HaveOccurred()) udnGateway, err := NewUserDefinedNetworkGateway(netInfo, 3, node, factoryMock.NodeCoreInformer().Lister(), @@ -332,26 +449,27 @@ var _ = Describe("UserDefinedNetworkGateway", func() { ObjectMeta: metav1.ObjectMeta{ Name: nodeName, Annotations: map[string]string{ - "k8s.ovn.org/network-ids": fmt.Sprintf("{\"%s\": \"%s\"}", netName, netID), + "k8s.ovn.org/network-ids": fmt.Sprintf("{\"%s\": \"%s\"}", netName, netID), + "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"%s\":[\"%s\", \"%s\"]}", netName, v4NodeSubnet, v6NodeSubnet), }, }, } nad := ovntest.GenerateNAD(netName, "rednad", "greenamespace", - types.Layer2Topology, "100.128.0.0/16,ae70::66/60", types.NetworkRolePrimary) + types.Layer2Topology, "100.128.0.0/16,ae70::/60", types.NetworkRolePrimary) netInfo, err := util.ParseNADInfo(nad) Expect(err).NotTo(HaveOccurred()) udnGateway, err := NewUserDefinedNetworkGateway(netInfo, 3, node, factoryMock.NodeCoreInformer().Lister(), &kubeMock, vrf, nil, &gateway{}) Expect(err).NotTo(HaveOccurred()) - getCreationFakeOVSCommands(fexec, mgtPort, mgtPortMAC, netName, nodeName, netInfo.MTU()) + _, ipNet, err := net.ParseCIDR(v4NodeSubnet) + Expect(err).NotTo(HaveOccurred()) + mgtPortMAC = util.IPAddrToHWAddr(util.GetNodeManagementIfAddr(ipNet).IP).String() + getCreationFakeCommands(fexec, mgtPort, mgtPortMAC, netName, nodeName, netInfo.MTU()) nodeLister.On("Get", mock.AnythingOfType("string")).Return(node, nil) factoryMock.On("GetNode", "worker1").Return(node, nil) - cnode := node.DeepCopy() - cnode.Annotations[util.OvnNodeManagementPortMacAddresses] = `{"bluenet":"00:00:00:55:66:77"}` - kubeMock.On("UpdateNodeStatus", cnode).Return(nil) err = testNS.Do(func(ns.NetNS) error { defer GinkgoRecover() - mpLink, _, err := udnGateway.addUDNManagementPort() + mpLink, err := udnGateway.addUDNManagementPort() Expect(err).NotTo(HaveOccurred()) Expect(mpLink).NotTo(BeNil()) Expect(udnGateway.addUDNManagementPortIPs(mpLink)).Should(Succeed()) @@ -371,12 +489,16 @@ var _ = Describe("UserDefinedNetworkGateway", func() { ObjectMeta: metav1.ObjectMeta{ Name: nodeName, Annotations: map[string]string{ - "k8s.ovn.org/network-ids": fmt.Sprintf("{\"%s\": \"%s\"}", netName, netID), + "k8s.ovn.org/network-ids": fmt.Sprintf("{\"%s\": \"%s\"}", netName, netID), + "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"%s\":[\"%s\", \"%s\"]}", netName, v4NodeSubnet, v6NodeSubnet), }, }, } nad := ovntest.GenerateNAD(netName, "rednad", "greenamespace", - types.Layer2Topology, "100.128.0.0/16,ae70::66/60", types.NetworkRolePrimary) + types.Layer2Topology, "100.128.0.0/16,ae70::/60", types.NetworkRolePrimary) + // must be defined so that the primary user defined network can match the ip families of the underlying cluster + config.IPv4Mode = true + config.IPv6Mode = true netInfo, err := util.ParseNADInfo(nad) Expect(err).NotTo(HaveOccurred()) udnGateway, err := NewUserDefinedNetworkGateway(netInfo, 3, node, factoryMock.NodeCoreInformer().Lister(), @@ -396,6 +518,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { Expect(fexec.CalledMatchesExpected()).To(BeTrue(), fexec.ErrorDesc) }) ovntest.OnSupportedPlatformsIt("should create and delete correct openflows on breth0 for a L3 user defined network", func() { + config.IPv4Mode = true config.IPv6Mode = true config.Gateway.Interface = "eth0" config.Gateway.NodeportEnable = true @@ -415,13 +538,18 @@ var _ = Describe("UserDefinedNetworkGateway", func() { }, } nad := ovntest.GenerateNAD(netName, "rednad", "greenamespace", - types.Layer3Topology, "100.128.0.0/16/24,ae70::66/60", types.NetworkRolePrimary) + types.Layer3Topology, "100.128.0.0/16/24,ae70::/60/64", types.NetworkRolePrimary) netInfo, err := util.ParseNADInfo(nad) Expect(err).NotTo(HaveOccurred()) setUpGatewayFakeOVSCommands(fexec) - getCreationFakeOVSCommands(fexec, mgtPort, mgtPortMAC, netName, nodeName, netInfo.MTU()) + _, ipNet, err := net.ParseCIDR(v4NodeSubnet) + Expect(err).NotTo(HaveOccurred()) + mgtPortMAC = util.IPAddrToHWAddr(util.GetNodeManagementIfAddr(ipNet).IP).String() + getCreationFakeCommands(fexec, mgtPort, mgtPortMAC, netName, nodeName, netInfo.MTU()) getVRFCreationFakeOVSCommands(fexec) + getRPFilterLooseModeFakeCommands(fexec) setUpUDNOpenflowManagerFakeOVSCommands(fexec) + setUpUDNOpenflowManagerCheckPortsFakeOVSCommands(fexec) getDeletionFakeOVSCommands(fexec, mgtPort) nodeLister.On("Get", mock.AnythingOfType("string")).Return(node, nil) kubeFakeClient := fake.NewSimpleClientset( @@ -430,7 +558,9 @@ var _ = Describe("UserDefinedNetworkGateway", func() { }, ) fakeClient := &util.OVNNodeClientset{ - KubeClient: kubeFakeClient, + KubeClient: kubeFakeClient, + NetworkAttchDefClient: nadfake.NewSimpleClientset(), + UserDefinedNetworkClient: udnfakeclient.NewSimpleClientset(), } stop := make(chan struct{}) @@ -442,9 +572,10 @@ var _ = Describe("UserDefinedNetworkGateway", func() { }() err = wf.Start() Expect(err).NotTo(HaveOccurred()) - cnode := node.DeepCopy() - cnode.Annotations[util.OvnNodeManagementPortMacAddresses] = `{"bluenet":"00:00:00:55:66:77"}` - kubeMock.On("UpdateNodeStatus", cnode).Return(nil) + + _, _ = util.SetFakeIPTablesHelpers() + nft := nodenft.SetFakeNFTablesHelper() + // Make a fake MgmtPortConfig with only the fields we care about fakeMgmtPortV4IPFamilyConfig := managementPortIPFamilyConfig{ ifAddr: ovntest.MustParseIPNet(v4NodeSubnet), @@ -459,9 +590,13 @@ var _ = Describe("UserDefinedNetworkGateway", func() { ifName: nodeName, link: nil, routerMAC: nil, + nft: nft, ipv4: &fakeMgmtPortV4IPFamilyConfig, ipv6: &fakeMgmtPortV6IPFamilyConfig, } + err = setupManagementPortNFTables(&fakeMgmtPortConfig) + Expect(err).NotTo(HaveOccurred()) + nodeAnnotatorMock := &kubemocks.Annotator{} nodeAnnotatorMock.On("Delete", mock.Anything).Return(nil) nodeAnnotatorMock.On("Set", mock.Anything, map[string]*util.L3GatewayConfig{ @@ -490,13 +625,18 @@ var _ = Describe("UserDefinedNetworkGateway", func() { defer GinkgoRecover() gatewayNextHops, gatewayIntf, err := getGatewayNextHops() Expect(err).NotTo(HaveOccurred()) + testNCM := &fakenad.FakeNetworkControllerManager{} + nadController, err := networkAttachDefController.NewNetAttachDefinitionController("test", testNCM, wf, nil) + Expect(err).NotTo(HaveOccurred()) // make preparations for creating openflow manager in DNCC which can be used for SNCC - localGw, err := newLocalGateway(nodeName, ovntest.MustParseIPNets(v4NodeSubnet, v6NodeSubnet), gatewayNextHops, - gatewayIntf, "", ifAddrs, nodeAnnotatorMock, &fakeMgmtPortConfig, &kubeMock, wf, rm) + localGw, err := newGateway(nodeName, ovntest.MustParseIPNets(v4NodeSubnet, v6NodeSubnet), gatewayNextHops, + gatewayIntf, "", ifAddrs, nodeAnnotatorMock, &fakeMgmtPortConfig, &kubeMock, wf, rm, nil, nadController, config.GatewayModeLocal) Expect(err).NotTo(HaveOccurred()) stop := make(chan struct{}) wg := &sync.WaitGroup{} + err = localGw.initFunc() + Expect(err).NotTo(HaveOccurred()) Expect(localGw.Init(stop, wg)).To(Succeed()) udnGateway, err := NewUserDefinedNetworkGateway(netInfo, 3, node, wf.NodeCoreInformer().Lister(), &kubeMock, vrf, ipRulesManager, localGw) @@ -509,7 +649,8 @@ var _ = Describe("UserDefinedNetworkGateway", func() { // FIXME: extract openflow manager func from the spawning of a go routine so it can be called directly below. localGw.openflowManager.syncFlows() flowMap := udnGateway.gateway.openflowManager.flowCache - Expect(len(flowMap["DEFAULT"])).To(Equal(45)) + + Expect(len(flowMap["DEFAULT"])).To(Equal(46)) Expect(udnGateway.masqCTMark).To(Equal(udnGateway.masqCTMark)) var udnFlows int for _, flows := range flowMap { @@ -526,25 +667,43 @@ var _ = Describe("UserDefinedNetworkGateway", func() { Expect(udnGateway.AddNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache - Expect(len(flowMap["DEFAULT"])).To(Equal(59)) // 14 UDN Flows are added by default + Expect(len(flowMap["DEFAULT"])).To(Equal(64)) // 18 UDN Flows are added by default Expect(len(udnGateway.openflowManager.defaultBridge.netConfig)).To(Equal(2)) // default network + UDN network + defaultUdnConfig := udnGateway.openflowManager.defaultBridge.netConfig["default"] + bridgeUdnConfig := udnGateway.openflowManager.defaultBridge.netConfig["bluenet"] + bridgeMAC := udnGateway.openflowManager.defaultBridge.macAddress.String() + ofPortHost := udnGateway.openflowManager.defaultBridge.ofPortHost for _, flows := range flowMap { for _, flow := range flows { if strings.Contains(flow, fmt.Sprintf("0x%x", udnGateway.masqCTMark)) { // UDN Flow udnFlows++ - } else if strings.Contains(flow, fmt.Sprintf("in_port=%s", udnGateway.openflowManager.defaultBridge.netConfig["bluenet"].ofPortPatch)) { + } else if strings.Contains(flow, fmt.Sprintf("in_port=%s", bridgeUdnConfig.ofPortPatch)) { udnFlows++ } } } Expect(udnFlows).To(Equal(14)) + openflowManagerCheckPorts(udnGateway.openflowManager) + + for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { + // Check flows for default network service CIDR. + checkDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) + + // Expect exactly one flow per UDN for table 2 for service isolation. + checkUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", ofPortHost, bridgeMAC, svcCIDR, 1) + } + + // The second call to checkPorts() will return no ofPort for the UDN - simulating a deletion that already was + // processed by ovn-northd/ovn-controller. We should not be panicking on that. + // See setUpUDNOpenflowManagerCheckPortsFakeOVSCommands() for the order of ofPort query results. + openflowManagerCheckPorts(udnGateway.openflowManager) cnode := node.DeepCopy() kubeMock.On("UpdateNodeStatus", cnode).Return(nil) // check if network key gets deleted from annotation Expect(udnGateway.DelNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache - Expect(len(flowMap["DEFAULT"])).To(Equal(45)) // only default network flows are present + Expect(len(flowMap["DEFAULT"])).To(Equal(46)) // only default network flows are present Expect(len(udnGateway.openflowManager.defaultBridge.netConfig)).To(Equal(1)) // default network only udnFlows = 0 for _, flows := range flowMap { @@ -556,12 +715,21 @@ var _ = Describe("UserDefinedNetworkGateway", func() { } } Expect(udnFlows).To(Equal(0)) + + for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { + // Check flows for default network service CIDR. + checkDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) + + // Expect no more flows per UDN for table 2 for service isolation. + checkUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", ofPortHost, bridgeMAC, svcCIDR, 0) + } return nil }) Expect(err).NotTo(HaveOccurred()) Expect(fexec.CalledMatchesExpected()).To(BeTrue(), fexec.ErrorDesc) }) ovntest.OnSupportedPlatformsIt("should create and delete correct openflows on breth0 for a L2 user defined network", func() { + config.IPv4Mode = true config.IPv6Mode = true config.Gateway.Interface = "eth0" config.Gateway.NodeportEnable = true @@ -571,7 +739,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { Name: nodeName, Annotations: map[string]string{ "k8s.ovn.org/network-ids": fmt.Sprintf("{\"%s\": \"%s\"}", netName, netID), - "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":[\"%s\"]}", v4NodeSubnet), + "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":[\"%s\"],\"%s\":[\"%s\", \"%s\"]}", v4NodeSubnet, netName, v4NodeSubnet, v6NodeSubnet), "k8s.ovn.org/host-cidrs": fmt.Sprintf("[\"%s\", \"%s\"]", v4NodeIP, v6NodeIP), }, }, @@ -581,13 +749,18 @@ var _ = Describe("UserDefinedNetworkGateway", func() { }, } nad := ovntest.GenerateNAD(netName, "rednad", "greenamespace", - types.Layer2Topology, "100.128.0.0/16,ae70::66/60", types.NetworkRolePrimary) + types.Layer2Topology, "100.128.0.0/16,ae70::/64", types.NetworkRolePrimary) netInfo, err := util.ParseNADInfo(nad) Expect(err).NotTo(HaveOccurred()) + _, ipNet, err := net.ParseCIDR(v4NodeSubnet) + Expect(err).NotTo(HaveOccurred()) + mgtPortMAC = util.IPAddrToHWAddr(util.GetNodeManagementIfAddr(ipNet).IP).String() setUpGatewayFakeOVSCommands(fexec) - getCreationFakeOVSCommands(fexec, mgtPort, mgtPortMAC, netName, nodeName, netInfo.MTU()) + getCreationFakeCommands(fexec, mgtPort, mgtPortMAC, netName, nodeName, netInfo.MTU()) getVRFCreationFakeOVSCommands(fexec) + getRPFilterLooseModeFakeCommands(fexec) setUpUDNOpenflowManagerFakeOVSCommands(fexec) + setUpUDNOpenflowManagerCheckPortsFakeOVSCommands(fexec) getDeletionFakeOVSCommands(fexec, mgtPort) nodeLister.On("Get", mock.AnythingOfType("string")).Return(node, nil) kubeFakeClient := fake.NewSimpleClientset( @@ -596,7 +769,9 @@ var _ = Describe("UserDefinedNetworkGateway", func() { }, ) fakeClient := &util.OVNNodeClientset{ - KubeClient: kubeFakeClient, + KubeClient: kubeFakeClient, + NetworkAttchDefClient: nadfake.NewSimpleClientset(), + UserDefinedNetworkClient: udnfakeclient.NewSimpleClientset(), } stop := make(chan struct{}) @@ -609,10 +784,12 @@ var _ = Describe("UserDefinedNetworkGateway", func() { wg.Wait() }() err = wf.Start() + + _, _ = util.SetFakeIPTablesHelpers() + nft := nodenft.SetFakeNFTablesHelper() + Expect(err).NotTo(HaveOccurred()) - cnode := node.DeepCopy() - cnode.Annotations[util.OvnNodeManagementPortMacAddresses] = `{"bluenet":"00:00:00:55:66:77"}` - kubeMock.On("UpdateNodeStatus", cnode).Return(nil) + // Make a fake MgmtPortConfig with only the fields we care about fakeMgmtPortV4IPFamilyConfig := managementPortIPFamilyConfig{ ifAddr: ovntest.MustParseIPNet(v4NodeSubnet), @@ -627,9 +804,13 @@ var _ = Describe("UserDefinedNetworkGateway", func() { ifName: nodeName, link: nil, routerMAC: nil, + nft: nft, ipv4: &fakeMgmtPortV4IPFamilyConfig, ipv6: &fakeMgmtPortV6IPFamilyConfig, } + err = setupManagementPortNFTables(&fakeMgmtPortConfig) + Expect(err).NotTo(HaveOccurred()) + nodeAnnotatorMock := &kubemocks.Annotator{} nodeAnnotatorMock.On("Delete", mock.Anything).Return(nil) nodeAnnotatorMock.On("Set", mock.Anything, map[string]*util.L3GatewayConfig{ @@ -658,13 +839,16 @@ var _ = Describe("UserDefinedNetworkGateway", func() { defer GinkgoRecover() gatewayNextHops, gatewayIntf, err := getGatewayNextHops() Expect(err).NotTo(HaveOccurred()) - + testNCM := &fakenad.FakeNetworkControllerManager{} + nadController, err := networkAttachDefController.NewNetAttachDefinitionController("test", testNCM, wf, nil) + Expect(err).NotTo(HaveOccurred()) // make preparations for creating openflow manager in DNCC which can be used for SNCC - localGw, err := newLocalGateway(nodeName, ovntest.MustParseIPNets(v4NodeSubnet, v6NodeSubnet), gatewayNextHops, - gatewayIntf, "", ifAddrs, nodeAnnotatorMock, &fakeMgmtPortConfig, &kubeMock, wf, rm) + localGw, err := newGateway(nodeName, ovntest.MustParseIPNets(v4NodeSubnet, v6NodeSubnet), gatewayNextHops, + gatewayIntf, "", ifAddrs, nodeAnnotatorMock, &fakeMgmtPortConfig, &kubeMock, wf, rm, nil, nadController, config.GatewayModeLocal) Expect(err).NotTo(HaveOccurred()) stop := make(chan struct{}) wg := &sync.WaitGroup{} + Expect(localGw.initFunc()).To(Succeed()) Expect(localGw.Init(stop, wg)).To(Succeed()) udnGateway, err := NewUserDefinedNetworkGateway(netInfo, 3, node, wf.NodeCoreInformer().Lister(), &kubeMock, vrf, ipRulesManager, localGw) @@ -677,7 +861,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { // FIXME: extract openflow manager func from the spawning of a go routine so it can be called directly below. localGw.openflowManager.syncFlows() flowMap := udnGateway.gateway.openflowManager.flowCache - Expect(len(flowMap["DEFAULT"])).To(Equal(45)) + Expect(len(flowMap["DEFAULT"])).To(Equal(46)) Expect(udnGateway.masqCTMark).To(Equal(udnGateway.masqCTMark)) var udnFlows int for _, flows := range flowMap { @@ -694,25 +878,43 @@ var _ = Describe("UserDefinedNetworkGateway", func() { Expect(udnGateway.AddNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache - Expect(len(flowMap["DEFAULT"])).To(Equal(59)) // 14 UDN Flows are added by default + Expect(len(flowMap["DEFAULT"])).To(Equal(64)) // 18 UDN Flows are added by default Expect(len(udnGateway.openflowManager.defaultBridge.netConfig)).To(Equal(2)) // default network + UDN network + defaultUdnConfig := udnGateway.openflowManager.defaultBridge.netConfig["default"] + bridgeUdnConfig := udnGateway.openflowManager.defaultBridge.netConfig["bluenet"] + bridgeMAC := udnGateway.openflowManager.defaultBridge.macAddress.String() + ofPortHost := udnGateway.openflowManager.defaultBridge.ofPortHost for _, flows := range flowMap { for _, flow := range flows { if strings.Contains(flow, fmt.Sprintf("0x%x", udnGateway.masqCTMark)) { // UDN Flow udnFlows++ - } else if strings.Contains(flow, fmt.Sprintf("in_port=%s", udnGateway.openflowManager.defaultBridge.netConfig["bluenet"].ofPortPatch)) { + } else if strings.Contains(flow, fmt.Sprintf("in_port=%s", bridgeUdnConfig.ofPortPatch)) { udnFlows++ } } } Expect(udnFlows).To(Equal(14)) + openflowManagerCheckPorts(udnGateway.openflowManager) + + for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { + // Check flows for default network service CIDR. + checkDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) + + // Expect exactly one flow per UDN for tables 0 and 2 for service isolation. + checkUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", ofPortHost, bridgeMAC, svcCIDR, 1) + } + + // The second call to checkPorts() will return no ofPort for the UDN - simulating a deletion that already was + // processed by ovn-northd/ovn-controller. We should not be panicking on that. + // See setUpUDNOpenflowManagerCheckPortsFakeOVSCommands() for the order of ofPort query results. + openflowManagerCheckPorts(udnGateway.openflowManager) cnode := node.DeepCopy() kubeMock.On("UpdateNodeStatus", cnode).Return(nil) // check if network key gets deleted from annotation Expect(udnGateway.DelNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache - Expect(len(flowMap["DEFAULT"])).To(Equal(45)) // only default network flows are present + Expect(len(flowMap["DEFAULT"])).To(Equal(46)) // only default network flows are present Expect(len(udnGateway.openflowManager.defaultBridge.netConfig)).To(Equal(1)) // default network only udnFlows = 0 for _, flows := range flowMap { @@ -724,6 +926,14 @@ var _ = Describe("UserDefinedNetworkGateway", func() { } } Expect(udnFlows).To(Equal(0)) + + for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { + // Check flows for default network service CIDR. + checkDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) + + // Expect no more flows per UDN for tables 0 and 2 for service isolation. + checkUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", ofPortHost, bridgeMAC, svcCIDR, 0) + } return nil }) Expect(err).NotTo(HaveOccurred()) @@ -736,10 +946,13 @@ var _ = Describe("UserDefinedNetworkGateway", func() { node := &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: nodeName, + Annotations: map[string]string{ + "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"%s\":[\"%s\", \"%s\"]}", netName, v4NodeSubnet, v6NodeSubnet), + }, }, } nad := ovntest.GenerateNAD(netName, "rednad", "greenamespace", - types.Layer3Topology, "100.128.0.0/16/24,ae70::66/60", types.NetworkRolePrimary) + types.Layer3Topology, "100.128.0.0/16/24,ae70::/60/64", types.NetworkRolePrimary) netInfo, err := util.ParseNADInfo(nad) Expect(err).NotTo(HaveOccurred()) udnGateway, err := NewUserDefinedNetworkGateway(netInfo, 3, node, nil, nil, vrf, nil, &gateway{}) @@ -755,7 +968,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { routes, err := udnGateway.computeRoutesForUDN(vrfTableId, mplink) Expect(err).NotTo(HaveOccurred()) - Expect(len(routes)).To(Equal(3)) + Expect(len(routes)).To(Equal(7)) Expect(err).NotTo(HaveOccurred()) Expect(*routes[0].Dst).To(Equal(*ovntest.MustParseIPNet("172.16.1.0/24"))) // default service subnet Expect(routes[0].LinkIndex).To(Equal(bridgelink.Attrs().Index)) @@ -768,6 +981,26 @@ var _ = Describe("UserDefinedNetworkGateway", func() { Expect(err).NotTo(HaveOccurred()) Expect(*routes[2].Dst).To(Equal(*cidr)) Expect(routes[2].LinkIndex).To(Equal(mplink.Attrs().Index)) + + // IPv4 ETP=Local service masquerade IP route + Expect(*routes[3].Dst).To(Equal(*ovntest.MustParseIPNet("169.254.169.3/32"))) // ETP=Local svc masq IP + Expect(routes[3].LinkIndex).To(Equal(mplink.Attrs().Index)) + Expect(routes[3].Gw.Equal(ovntest.MustParseIP("100.128.0.1"))).To(BeTrue()) + + // IPv4 cluster subnet route + Expect(*routes[4].Dst).To(Equal(*ovntest.MustParseIPNet("100.128.0.0/16"))) // cluster subnet route + Expect(routes[4].LinkIndex).To(Equal(mplink.Attrs().Index)) + Expect(routes[4].Gw.Equal(ovntest.MustParseIP("100.128.0.1"))).To(BeTrue()) + + // IPv6 ETP=Local service masquerade IP route + Expect(*routes[5].Dst).To(Equal(*ovntest.MustParseIPNet("fd69::3/128"))) // ETP=Local svc masq IP + Expect(routes[5].LinkIndex).To(Equal(mplink.Attrs().Index)) + Expect(routes[5].Gw.Equal(ovntest.MustParseIP("ae70::1"))).To(BeTrue()) + + // IPv6 cluster subnet route + Expect(*routes[6].Dst).To(Equal(*ovntest.MustParseIPNet("ae70::/60"))) // cluster subnet route + Expect(routes[6].LinkIndex).To(Equal(mplink.Attrs().Index)) + Expect(routes[6].Gw.Equal(ovntest.MustParseIP("ae70::1"))).To(BeTrue()) return nil }) Expect(err).NotTo(HaveOccurred()) @@ -781,10 +1014,13 @@ var _ = Describe("UserDefinedNetworkGateway", func() { node := &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: nodeName, + Annotations: map[string]string{ + "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"%s\":[\"%s\", \"%s\"]}", netName, v4NodeSubnet, v6NodeSubnet), + }, }, } nad := ovntest.GenerateNAD(netName, "rednad", "greenamespace", - types.Layer3Topology, "100.128.0.0/16/24,ae70::66/60", types.NetworkRolePrimary) + types.Layer3Topology, "100.128.0.0/16/24,ae70::/60/64", types.NetworkRolePrimary) netInfo, err := util.ParseNADInfo(nad) Expect(err).NotTo(HaveOccurred()) udnGateway, err := NewUserDefinedNetworkGateway(netInfo, 3, node, nil, nil, vrf, nil, &gateway{}) @@ -801,7 +1037,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { routes, err := udnGateway.computeRoutesForUDN(vrfTableId, mplink) Expect(err).NotTo(HaveOccurred()) - Expect(len(routes)).To(Equal(4)) + Expect(len(routes)).To(Equal(8)) Expect(err).NotTo(HaveOccurred()) // 1st and 2nd routes are the service routes from user-provided config value Expect(*routes[0].Dst).To(Equal(*config.Kubernetes.ServiceCIDRs[0])) @@ -816,6 +1052,37 @@ var _ = Describe("UserDefinedNetworkGateway", func() { Expect(err).NotTo(HaveOccurred()) Expect(*routes[3].Dst).To(Equal(*cidr)) Expect(routes[3].LinkIndex).To(Equal(mplink.Attrs().Index)) + + // IPv4 ETP=Local service masquerade IP route + Expect(*routes[4].Dst).To(Equal(*ovntest.MustParseIPNet("169.254.169.3/32"))) // ETP=Local svc masq IP + Expect(routes[4].LinkIndex).To(Equal(mplink.Attrs().Index)) + Expect(routes[4].Gw.Equal(ovntest.MustParseIP("100.128.0.1"))).To(BeTrue()) + + // IPv4 cluster subnet route + Expect(*routes[5].Dst).To(Equal(*ovntest.MustParseIPNet("100.128.0.0/16"))) // cluster subnet route + Expect(routes[5].LinkIndex).To(Equal(mplink.Attrs().Index)) + Expect(routes[5].Gw.Equal(ovntest.MustParseIP("100.128.0.1"))).To(BeTrue()) + + // IPv6 ETP=Local service masquerade IP route + Expect(*routes[6].Dst).To(Equal(*ovntest.MustParseIPNet("fd69::3/128"))) // ETP=Local svc masq IP + Expect(routes[6].LinkIndex).To(Equal(mplink.Attrs().Index)) + Expect(routes[6].Gw.Equal(ovntest.MustParseIP("ae70::1"))).To(BeTrue()) + + // IPv6 cluster subnet route + Expect(*routes[7].Dst).To(Equal(*ovntest.MustParseIPNet("ae70::/60"))) // cluster subnet route + Expect(routes[7].LinkIndex).To(Equal(mplink.Attrs().Index)) + Expect(routes[7].Gw.Equal(ovntest.MustParseIP("ae70::1"))).To(BeTrue()) + return nil + }) + Expect(err).NotTo(HaveOccurred()) + Expect(fexec.CalledMatchesExpected()).To(BeTrue(), fexec.ErrorDesc) + }) + ovntest.OnSupportedPlatformsIt("should set rp filer to loose mode for management port interface", func() { + getRPFilterLooseModeFakeCommands(fexec) + err := testNS.Do(func(ns.NetNS) error { + defer GinkgoRecover() + err := addRPFilterLooseModeForManagementPort(mgtPort) + Expect(err).NotTo(HaveOccurred()) return nil }) Expect(err).NotTo(HaveOccurred()) @@ -828,6 +1095,7 @@ func TestConstructUDNVRFIPRules(t *testing.T) { priority int family int table int + mark int dst net.IPNet } type testConfig struct { @@ -848,6 +1116,12 @@ func TestConstructUDNVRFIPRules(t *testing.T) { desc: "v4 rule test", vrftableID: 1007, expectedRules: []testRule{ + { + priority: UDNMasqueradeIPRulePriority, + family: netlink.FAMILY_V4, + table: 1007, + mark: 0x1003, + }, { priority: UDNMasqueradeIPRulePriority, family: netlink.FAMILY_V4, @@ -861,6 +1135,12 @@ func TestConstructUDNVRFIPRules(t *testing.T) { desc: "v6 rule test", vrftableID: 1009, expectedRules: []testRule{ + { + priority: UDNMasqueradeIPRulePriority, + family: netlink.FAMILY_V6, + table: 1009, + mark: 0x1003, + }, { priority: UDNMasqueradeIPRulePriority, family: netlink.FAMILY_V6, @@ -874,12 +1154,24 @@ func TestConstructUDNVRFIPRules(t *testing.T) { desc: "dualstack rule test", vrftableID: 1010, expectedRules: []testRule{ + { + priority: UDNMasqueradeIPRulePriority, + family: netlink.FAMILY_V4, + table: 1010, + mark: 0x1003, + }, { priority: UDNMasqueradeIPRulePriority, family: netlink.FAMILY_V4, table: 1010, dst: *util.GetIPNetFullMaskFromIP(ovntest.MustParseIP("169.254.0.16")), }, + { + priority: UDNMasqueradeIPRulePriority, + family: netlink.FAMILY_V6, + table: 1010, + mark: 0x1003, + }, { priority: UDNMasqueradeIPRulePriority, family: netlink.FAMILY_V6, @@ -898,8 +1190,18 @@ func TestConstructUDNVRFIPRules(t *testing.T) { g := gomega.NewWithT(t) config.IPv4Mode = test.v4mode config.IPv6Mode = test.v6mode + cidr := "" + if config.IPv4Mode { + cidr = "100.128.0.0/16/24" + + } + if config.IPv4Mode && config.IPv6Mode { + cidr += ",ae70::/60/64" + } else if config.IPv6Mode { + cidr = "ae70::/60/64" + } nad := ovntest.GenerateNAD("bluenet", "rednad", "greenamespace", - types.Layer3Topology, "100.128.0.0/16/24,ae70::66/60", types.NetworkRolePrimary) + types.Layer3Topology, cidr, types.NetworkRolePrimary) netInfo, err := util.ParseNADInfo(nad) g.Expect(err).NotTo(HaveOccurred()) udnGateway, err := NewUserDefinedNetworkGateway(netInfo, 3, nil, nil, nil, nil, nil, &gateway{}) @@ -910,7 +1212,11 @@ func TestConstructUDNVRFIPRules(t *testing.T) { g.Expect(rule.Priority).To(gomega.Equal(test.expectedRules[i].priority)) g.Expect(rule.Table).To(gomega.Equal(test.expectedRules[i].table)) g.Expect(rule.Family).To(gomega.Equal(test.expectedRules[i].family)) - g.Expect(*rule.Dst).To(gomega.Equal(test.expectedRules[i].dst)) + if rule.Dst != nil { + g.Expect(*rule.Dst).To(gomega.Equal(test.expectedRules[i].dst)) + } else { + g.Expect(rule.Mark).To(gomega.Equal(test.expectedRules[i].mark)) + } } }) } diff --git a/go-controller/pkg/node/healthcheck_node_test.go b/go-controller/pkg/node/healthcheck_node_test.go index 16021e5815..e86bb06d9d 100644 --- a/go-controller/pkg/node/healthcheck_node_test.go +++ b/go-controller/pkg/node/healthcheck_node_test.go @@ -7,8 +7,9 @@ import ( "sync" "time" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" @@ -85,6 +86,7 @@ var _ = Describe("Node healthcheck tests", func() { ) BeforeEach(func() { + config.PrepareTestConfig() stopCh = make(chan struct{}) wg = &sync.WaitGroup{} os.Setenv("POD_NAME", ovnkNodePodName) diff --git a/go-controller/pkg/node/iprulemanager/ip_rule_manager_suite_test.go b/go-controller/pkg/node/iprulemanager/ip_rule_manager_suite_test.go index c85780ffde..54f15792b4 100644 --- a/go-controller/pkg/node/iprulemanager/ip_rule_manager_suite_test.go +++ b/go-controller/pkg/node/iprulemanager/ip_rule_manager_suite_test.go @@ -3,7 +3,7 @@ package iprulemanager import ( "testing" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/node/iprulemanager/ip_rule_manager_test.go b/go-controller/pkg/node/iprulemanager/ip_rule_manager_test.go index cf71a35c38..88e1a550de 100644 --- a/go-controller/pkg/node/iprulemanager/ip_rule_manager_test.go +++ b/go-controller/pkg/node/iprulemanager/ip_rule_manager_test.go @@ -3,7 +3,6 @@ package iprulemanager import ( "fmt" "net" - "os" "runtime" "sync" "time" @@ -11,9 +10,11 @@ import ( "github.com/containernetworking/plugins/pkg/ns" "github.com/containernetworking/plugins/pkg/testutils" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "github.com/vishvananda/netlink" + + ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" ) const oneSec = 1 * time.Second @@ -36,7 +37,7 @@ var _ = ginkgo.XDescribe("IP Rule Manager", func() { ruleWithSrc.Src = testIPNet defer ginkgo.GinkgoRecover() - if os.Getenv("NOROOT") == "TRUE" { + if ovntest.NoRoot() { ginkgo.Skip("Test requires root privileges") } diff --git a/go-controller/pkg/node/iptables/iptables_manager_test.go b/go-controller/pkg/node/iptables/iptables_manager_test.go index ccdbb96097..b5750c31de 100644 --- a/go-controller/pkg/node/iptables/iptables_manager_test.go +++ b/go-controller/pkg/node/iptables/iptables_manager_test.go @@ -1,7 +1,6 @@ package iptables import ( - "os" "os/exec" "runtime" "sync" @@ -11,8 +10,10 @@ import ( "github.com/containernetworking/plugins/pkg/ns" "github.com/containernetworking/plugins/pkg/testutils" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + + ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" ) const ( @@ -28,7 +29,7 @@ var _ = ginkgo.Describe("IPTables Manager", func() { ginkgo.BeforeEach(func() { defer ginkgo.GinkgoRecover() - if os.Getenv("NOROOT") == "TRUE" { + if ovntest.NoRoot() { ginkgo.Skip("Test requires root privileges") } if !commandExists("iptables") { diff --git a/go-controller/pkg/node/iptables/iptables_suite_test.go b/go-controller/pkg/node/iptables/iptables_suite_test.go index 49742b6106..7e85a2e59b 100644 --- a/go-controller/pkg/node/iptables/iptables_suite_test.go +++ b/go-controller/pkg/node/iptables/iptables_suite_test.go @@ -1,7 +1,7 @@ package iptables import ( - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "testing" ) diff --git a/go-controller/pkg/node/linkmanager/link_manager_suite_test.go b/go-controller/pkg/node/linkmanager/link_manager_suite_test.go index a781c98f36..252eaec5db 100644 --- a/go-controller/pkg/node/linkmanager/link_manager_suite_test.go +++ b/go-controller/pkg/node/linkmanager/link_manager_suite_test.go @@ -3,7 +3,7 @@ package linkmanager import ( "testing" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/node/linkmanager/link_network_manager_test.go b/go-controller/pkg/node/linkmanager/link_network_manager_test.go index dd5bf418c8..0c039ee672 100644 --- a/go-controller/pkg/node/linkmanager/link_network_manager_test.go +++ b/go-controller/pkg/node/linkmanager/link_network_manager_test.go @@ -7,8 +7,8 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/mocks" - "github.com/onsi/ginkgo" - "github.com/onsi/ginkgo/extensions/table" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" "github.com/stretchr/testify/mock" "github.com/vishvananda/netlink" @@ -112,7 +112,7 @@ var _ = ginkgo.Describe("Link network manager", func() { // // There maybe a discrepancy between existingLinkAddr link addresses and existingStore link addresses because a link may // have addresses that aren't managed. Link1 is always the target of the new addresses to add. - table.DescribeTable("Add address to link1", func(addrToAdd netlink.Addr, existingLinkAddr []netlink.Addr, existingStore map[string][]netlink.Addr, + ginkgo.DescribeTable("Add address to link1", func(addrToAdd netlink.Addr, existingLinkAddr []netlink.Addr, existingStore map[string][]netlink.Addr, v4Enabled, v6Enabled, expectErr, expectAddAddrCalled bool) { expectedAddr := addrToAdd @@ -138,17 +138,17 @@ var _ = ginkgo.Describe("Link network manager", func() { if expectAddAddrCalled { gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrAdd", nlLink1Mock, &expectedAddr)).Should(gomega.BeTrue()) } - }, table.Entry("Add valid IPv4 address with empty store", + }, ginkgo.Entry("Add valid IPv4 address with empty store", newNetlinkAddrWithIndexSet(v4CIDR1, linkName1), []netlink.Addr{}, map[string][]netlink.Addr{}, v4Enabled, v6Disabled, noErr, addrAddCalled), - table.Entry("Doesn't add IPv4 address when IPv4 is disabled and IPv6 enabled", + ginkgo.Entry("Doesn't add IPv4 address when IPv4 is disabled and IPv6 enabled", newNetlinkAddrWithIndexSet(v4CIDR1, linkName1), []netlink.Addr{}, map[string][]netlink.Addr{}, v4Disable, v6Enabled, Err, addrAddNotCalled), - table.Entry("Add IPv4 address when it exists in store but not applied", + ginkgo.Entry("Add IPv4 address when it exists in store but not applied", newNetlinkAddrWithIndexSet(v4CIDR1, linkName1), []netlink.Addr{}, map[string][]netlink.Addr{ linkName1: {newNetlinkAddrWithIndexSet(v4CIDR1, linkName1)}, }, v4Enabled, v6Disabled, noErr, addrAddNotCalled), - table.Entry("Doesn't attempt to add an IPv4 address when already applied and exists in store", + ginkgo.Entry("Doesn't attempt to add an IPv4 address when already applied and exists in store", newNetlinkAddrWithIndexSet(v4CIDR1, linkName1), []netlink.Addr{newNetlinkAddrWithIndexSet(v4CIDR1, linkName1)}, map[string][]netlink.Addr{ linkName1: {newNetlinkAddrWithIndexSet(v4CIDR1, linkName1)}, @@ -162,7 +162,7 @@ var _ = ginkgo.Describe("Link network manager", func() { // // There maybe a discrepancy between existingLinkAddr link addresses and existingStore link addresses because a link may // have addresses that aren't managed. Link1 is always the target of the new addresses to delete. - table.DescribeTable("Delete address from link1", func(addrToDel netlink.Addr, existingLinkAddr []netlink.Addr, existingStore map[string][]netlink.Addr, + ginkgo.DescribeTable("Delete address from link1", func(addrToDel netlink.Addr, existingLinkAddr []netlink.Addr, existingStore map[string][]netlink.Addr, v4Enabled, v6Enabled, expectErr, expectDelAddrCalled bool) { expectedAddr := addrToDel @@ -188,7 +188,7 @@ var _ = ginkgo.Describe("Link network manager", func() { if expectDelAddrCalled { gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrDel", nlLink1Mock, &expectedAddr)).Should(gomega.BeTrue()) } - }, table.Entry("Deletes an IPv4 address which exists in store and is applied", + }, ginkgo.Entry("Deletes an IPv4 address which exists in store and is applied", newNetlinkAddrWithIndexSet(v4CIDR1, linkName1), []netlink.Addr{ newNetlinkAddrWithIndexSet(v4CIDR1, linkName1), newNetlinkAddrWithIndexSet(v4CIDR2, linkName2), @@ -196,7 +196,7 @@ var _ = ginkgo.Describe("Link network manager", func() { linkName1: {newNetlinkAddrWithIndexSet(v4CIDR1, linkName1)}, linkName2: {newNetlinkAddrWithIndexSet(v4CIDR2, linkName2)}, }, v4Enabled, v6Disabled, noErr, addrDelCalled), - table.Entry("Doesn't attempt to delete an IPv4 address which exists in store but not applied", + ginkgo.Entry("Doesn't attempt to delete an IPv4 address which exists in store but not applied", newNetlinkAddrWithIndexSet(v4CIDR1, linkName1), []netlink.Addr{ newNetlinkAddrWithIndexSet(v4CIDR1, linkName1), // different address than the one attempted to be deleted newNetlinkAddrWithIndexSet(v4CIDR2, linkName2), @@ -204,7 +204,7 @@ var _ = ginkgo.Describe("Link network manager", func() { linkName1: {newNetlinkAddrWithIndexSet(v4CIDR1, linkName1)}, linkName2: {newNetlinkAddrWithIndexSet(v4CIDR2, linkName2)}, }, v4Enabled, v6Disabled, noErr, addrDelNotCalled), - table.Entry("Doesn't delete IPv4 address when IPv4 is disabled and IPv6 enabled", + ginkgo.Entry("Doesn't delete IPv4 address when IPv4 is disabled and IPv6 enabled", newNetlinkAddrWithIndexSet(v4CIDR1, linkName1), []netlink.Addr{}, map[string][]netlink.Addr{}, v4Disable, v6Enabled, Err, addrDelNotCalled), ) }) diff --git a/go-controller/pkg/node/management-port-dpu.go b/go-controller/pkg/node/management-port-dpu.go index 9009ab3194..13850af74c 100644 --- a/go-controller/pkg/node/management-port-dpu.go +++ b/go-controller/pkg/node/management-port-dpu.go @@ -113,10 +113,6 @@ func (mp *managementPortRepresentor) Create(_ *routemanager.Controller, node *v1 link: link, } - mgmtPortMac := util.IPAddrToHWAddr(util.GetNodeManagementIfAddr(mp.hostSubnets[0]).IP) - if err := util.UpdateNodeManagementPortMACAddressesWithRetry(node, nodeLister, kubeInterface, mgmtPortMac, types.DefaultNetworkName); err != nil { - return nil, err - } waiter.AddWait(managementPortReady, nil) return mpcfg, nil } diff --git a/go-controller/pkg/node/management-port.go b/go-controller/pkg/node/management-port.go index c716e59ddd..ada7342be3 100644 --- a/go-controller/pkg/node/management-port.go +++ b/go-controller/pkg/node/management-port.go @@ -86,41 +86,39 @@ func (mp *managementPort) Create(routeManager *routemanager.Controller, node *v1 } } + var err error + var macAddr net.HardwareAddr + // find suitable MAC address + // check node annotation first, to ensure we are not picking a new MAC when one was already configured + if macAddr, err = util.ParseNodeManagementPortMACAddresses(node, types.DefaultNetworkName); err != nil && !util.IsAnnotationNotSetError(err) { + return nil, err + } + if len(macAddr) == 0 { + // calculate mac from subnets + if len(mp.hostSubnets) == 0 { + return nil, fmt.Errorf("cannot determine subnets while configuring management port for network: %s", types.DefaultNetworkName) + } + macAddr = util.IPAddrToHWAddr(util.GetNodeManagementIfAddr(mp.hostSubnets[0]).IP) + } + // Create a OVS internal interface. legacyMgmtIntfName := util.GetLegacyK8sMgmtIntfName(mp.nodeName) stdout, stderr, err := util.RunOVSVsctl( "--", "--if-exists", "del-port", "br-int", legacyMgmtIntfName, "--", "--may-exist", "add-port", "br-int", types.K8sMgmtIntfName, - "--", "set", "interface", types.K8sMgmtIntfName, + "--", "set", "interface", types.K8sMgmtIntfName, fmt.Sprintf("mac=\"%s\"", macAddr.String()), "type=internal", "mtu_request="+fmt.Sprintf("%d", config.Default.MTU), "external-ids:iface-id="+types.K8sPrefix+mp.nodeName) if err != nil { klog.Errorf("Failed to add port to br-int, stdout: %q, stderr: %q, error: %v", stdout, stderr, err) return nil, err } - macAddress, err := util.GetOVSPortMACAddress(types.K8sMgmtIntfName) - if err != nil { - klog.Errorf("Failed to get management port MAC address: %v", err) - return nil, err - } - // persist the MAC address so that upon node reboot we get back the same mac address. - _, stderr, err = util.RunOVSVsctl("set", "interface", types.K8sMgmtIntfName, - fmt.Sprintf("mac=%s", strings.ReplaceAll(macAddress.String(), ":", "\\:"))) - if err != nil { - klog.Errorf("Failed to persist MAC address %q for %q: stderr:%s (%v)", macAddress.String(), - types.K8sMgmtIntfName, stderr, err) - return nil, err - } cfg, err := createPlatformManagementPort(routeManager, types.K8sMgmtIntfName, mp.hostSubnets) if err != nil { return nil, err } - if err := util.UpdateNodeManagementPortMACAddressesWithRetry(node, nodeLister, kubeInterface, macAddress, types.DefaultNetworkName); err != nil { - return nil, err - } - waiter.AddWait(managementPortReady, nil) return cfg, nil } diff --git a/go-controller/pkg/node/management-port_dpu_test.go b/go-controller/pkg/node/management-port_dpu_test.go index b0c1fee842..ef88ddd2f2 100644 --- a/go-controller/pkg/node/management-port_dpu_test.go +++ b/go-controller/pkg/node/management-port_dpu_test.go @@ -4,8 +4,9 @@ import ( "fmt" "net" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" egressfirewallfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/clientset/versioned/fake" egressipv1fake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/clientset/versioned/fake" diff --git a/go-controller/pkg/node/management-port_linux.go b/go-controller/pkg/node/management-port_linux.go index 6b917a5279..a79beb8cad 100644 --- a/go-controller/pkg/node/management-port_linux.go +++ b/go-controller/pkg/node/management-port_linux.go @@ -4,13 +4,14 @@ package node import ( + "context" "fmt" "net" - "strings" "time" "github.com/coreos/go-iptables/iptables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/routemanager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" @@ -18,14 +19,33 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" + "sigs.k8s.io/knftables" ) const ( + // The legacy iptables management port chain iptableMgmPortChain = "OVN-KUBE-SNAT-MGMTPORT" + + // The "mgmtport-snat" chain contains the rules to SNAT traffic sent to the + // management port (except for `externalTrafficPolicy: Local` traffic, where + // the source IP must be preserved). + nftablesMgmtPortChain = "mgmtport-snat" + + // "mgmtport-no-snat-nodeports" is a set containing protocol / nodePort tuples + // indicating traffic that should not be SNATted when passing through the + // management port because it is addressed to an `externalTrafficPolicy: Local` + // NodePort. + nftablesMgmtPortNoSNATNodePorts = "mgmtport-no-snat-nodeports" + + // "mgmtport-no-snat-services-v4" and "mgmtport-no-snat-services-v6" are sets + // containing loadBalancerIP / protocol / port tuples indicating traffic that + // should not be SNATted when passing through the management port because it is + // addressed to an `externalTrafficPolicy: Local` load balancer IP. + nftablesMgmtPortNoSNATServicesV4 = "mgmtport-no-snat-services-v4" + nftablesMgmtPortNoSNATServicesV6 = "mgmtport-no-snat-services-v6" ) type managementPortIPFamilyConfig struct { - ipt util.IPTablesHelper allSubnets []*net.IPNet ifAddr *net.IPNet gwIP net.IP @@ -35,6 +55,7 @@ type managementPortConfig struct { ifName string link netlink.Link routerMAC net.HardwareAddr + nft knftables.Interface ipv4 *managementPortIPFamilyConfig ipv6 *managementPortIPFamilyConfig @@ -70,11 +91,6 @@ func newManagementPortIPFamilyConfig(hostSubnet *net.IPNet, isIPv6 bool) (*manag cfg.allSubnets = append(cfg.allSubnets, masqueradeSubnet) } - if utilnet.IsIPv6CIDR(cfg.ifAddr) { - cfg.ipt, err = util.GetIPTablesHelper(iptables.ProtocolIPv6) - } else { - cfg.ipt, err = util.GetIPTablesHelper(iptables.ProtocolIPv4) - } if err != nil { return nil, err } @@ -83,10 +99,14 @@ func newManagementPortIPFamilyConfig(hostSubnet *net.IPNet, isIPv6 bool) (*manag } func newManagementPortConfig(interfaceName string, hostSubnets []*net.IPNet) (*managementPortConfig, error) { - var err error + nft, err := nodenft.GetNFTablesHelper() + if err != nil { + return nil, err + } mpcfg := &managementPortConfig{ ifName: interfaceName, + nft: nft, } if mpcfg.link, err = util.LinkSetUp(mpcfg.ifName); err != nil { return nil, err @@ -137,7 +157,7 @@ func newManagementPortConfig(interfaceName string, hostSubnets []*net.IPNet) (*m return mpcfg, nil } -func tearDownInterfaceIPConfig(link netlink.Link, ipt4, ipt6 util.IPTablesHelper) error { +func tearDownManagementPortConfig(link netlink.Link, nft knftables.Interface) error { if err := util.LinkAddrFlush(link); err != nil { return err } @@ -145,63 +165,45 @@ func tearDownInterfaceIPConfig(link netlink.Link, ipt4, ipt6 util.IPTablesHelper if err := util.LinkRoutesDel(link, nil); err != nil { return err } - if ipt4 != nil { - if err := ipt4.ClearChain("nat", iptableMgmPortChain); err != nil { - return fmt.Errorf("could not clear the iptables chain for management port: %v", err) - } - } - if ipt6 != nil { - if err := ipt6.ClearChain("nat", iptableMgmPortChain); err != nil { - return fmt.Errorf("could not clear the iptables chain for management port: %v", err) - } + tx := nft.NewTransaction() + // Delete would return an error if we tried to delete a chain that didn't exist, so + // we do an Add first (which is a no-op if the chain already exists) and then Delete. + tx.Add(&knftables.Chain{ + Name: nftablesMgmtPortChain, + }) + tx.Delete(&knftables.Chain{ + Name: nftablesMgmtPortChain, + }) + err := nft.Run(context.TODO(), tx) + if err != nil && !knftables.IsNotFound(err) { + return fmt.Errorf("could not clear the nftables chain for management port: %v", err) } return nil } -func tearDownManagementPortConfig(mpcfg *managementPortConfig) error { - // for the initial setup we need to start from the clean slate, so flush - // all (non-LL) addresses on this link, routes through this link, and - // finally any IPtable rules for this link. - var ipt4, ipt6 util.IPTablesHelper - - if mpcfg.ipv4 != nil { - ipt4 = mpcfg.ipv4.ipt - } - if mpcfg.ipv6 != nil { - ipt6 = mpcfg.ipv6.ipt - } - return tearDownInterfaceIPConfig(mpcfg.link, ipt4, ipt6) -} - func setupManagementPortIPFamilyConfig(routeManager *routemanager.Controller, mpcfg *managementPortConfig, cfg *managementPortIPFamilyConfig) ([]string, error) { var warnings []string var err error var exists bool - if exists, err = util.LinkAddrExist(mpcfg.link, cfg.ifAddr); err == nil && !exists { - // we should log this so that one can debug as to why addresses are - // disappearing - warnings = append(warnings, fmt.Sprintf("missing IP address %s on the interface %s, adding it...", - cfg.ifAddr, mpcfg.ifName)) - err = util.LinkAddrAdd(mpcfg.link, cfg.ifAddr, 0, 0, 0) - } + // synchronize IP addresses, removing undesired addresses + // should also remove routes specifying those undesired addresses + err = util.SyncAddresses(mpcfg.link, []*net.IPNet{cfg.ifAddr}) if err != nil { return warnings, err } + // now check for addition of any missing routes for _, subnet := range cfg.allSubnets { - exists, err = util.LinkRouteExists(mpcfg.link, cfg.gwIP, subnet) - if err != nil { - return warnings, err + route, err := util.LinkRouteGetByDstAndGw(mpcfg.link, cfg.gwIP, subnet) + if err != nil || route == nil { + // we need to warn so that it can be debugged as to why routes are incorrect + warnings = append(warnings, fmt.Sprintf("missing or unable to find route entry for subnet %s "+ + "via gateway %s on link %v with MTU: %d", subnet, cfg.gwIP, mpcfg.ifName, config.Default.RoutableMTU)) } - if exists { - continue - } - // we need to warn so that it can be debugged as to why routes are disappearing - warnings = append(warnings, fmt.Sprintf("missing route entry for subnet %s via gateway %s on link %v", - subnet, cfg.gwIP, mpcfg.ifName)) + subnetCopy := *subnet err = routeManager.Add(netlink.Route{LinkIndex: mpcfg.link.Attrs().Index, Gw: cfg.gwIP, Dst: &subnetCopy, MTU: config.Default.RoutableMTU}) if err != nil { @@ -237,7 +239,7 @@ func setupManagementPortIPFamilyConfig(routeManager *routemanager.Controller, mp } // IPv6 forwarding is enabled globally - if mpcfg.ipv4 != nil && cfg == mpcfg.ipv4 { + if cfg == mpcfg.ipv4 { stdout, stderr, err := util.RunSysctl("-w", fmt.Sprintf("net.ipv4.conf.%s.forwarding=1", types.K8sMgmtIntfName)) if err != nil || stdout != fmt.Sprintf("net.ipv4.conf.%s.forwarding = 1", types.K8sMgmtIntfName) { return warnings, fmt.Errorf("could not set the correct forwarding value for interface %s: stdout: %v, stderr: %v, err: %v", @@ -245,38 +247,6 @@ func setupManagementPortIPFamilyConfig(routeManager *routemanager.Controller, mp } } - if _, err = cfg.ipt.List("nat", iptableMgmPortChain); err != nil { - warnings = append(warnings, fmt.Sprintf("missing iptables chain %s in the nat table, adding it", - iptableMgmPortChain)) - err = cfg.ipt.NewChain("nat", iptableMgmPortChain) - } - if err != nil { - return warnings, fmt.Errorf("could not create iptables nat chain %q for management port: %v", - iptableMgmPortChain, err) - } - rule := []string{"-o", mpcfg.ifName, "-j", iptableMgmPortChain} - if exists, err = cfg.ipt.Exists("nat", "POSTROUTING", rule...); err == nil && !exists { - warnings = append(warnings, fmt.Sprintf("missing iptables postrouting nat chain %s, adding it", - iptableMgmPortChain)) - err = cfg.ipt.Insert("nat", "POSTROUTING", 1, rule...) - } - if err != nil { - return warnings, fmt.Errorf("could not insert iptables rule %q for management port: %v", - strings.Join(rule, " "), err) - } - rule = []string{"-o", mpcfg.ifName, "-j", "SNAT", "--to-source", cfg.ifAddr.IP.String(), - "-m", "comment", "--comment", "OVN SNAT to Management Port"} - if exists, err = cfg.ipt.Exists("nat", iptableMgmPortChain, rule...); err == nil && !exists { - warnings = append(warnings, fmt.Sprintf("missing management port nat rule in chain %s, adding it", - iptableMgmPortChain)) - // NOTE: SNAT to mp0 rule should be the last in the chain, so append it - err = cfg.ipt.Append("nat", iptableMgmPortChain, rule...) - } - if err != nil { - return warnings, fmt.Errorf("could not insert iptable rule %q for management port: %v", - strings.Join(rule, " "), err) - } - return warnings, nil } @@ -296,6 +266,99 @@ func setupManagementPortConfig(routeManager *routemanager.Controller, cfg *manag return allWarnings, err } +func setupManagementPortNFTables(cfg *managementPortConfig) error { + counterIfDebug := "" + if config.Logging.Level > 4 { + counterIfDebug = "counter" + } + + tx := cfg.nft.NewTransaction() + tx.Add(&knftables.Chain{ + Name: nftablesMgmtPortChain, + Comment: knftables.PtrTo("OVN SNAT to Management Port"), + + Type: knftables.PtrTo(knftables.NATType), + Hook: knftables.PtrTo(knftables.PostroutingHook), + Priority: knftables.PtrTo(knftables.SNATPriority), + }) + tx.Add(&knftables.Set{ + Name: nftablesMgmtPortNoSNATNodePorts, + Comment: knftables.PtrTo("NodePorts not subject to management port SNAT"), + Type: "inet_proto . inet_service", + }) + tx.Add(&knftables.Set{ + Name: nftablesMgmtPortNoSNATServicesV4, + Comment: knftables.PtrTo("eTP:Local short-circuit not subject to management port SNAT (IPv4)"), + Type: "ipv4_addr . inet_proto . inet_service", + }) + tx.Add(&knftables.Set{ + Name: nftablesMgmtPortNoSNATServicesV6, + Comment: knftables.PtrTo("eTP:Local short-circuit not subject to management port SNAT (IPv6)"), + Type: "ipv6_addr . inet_proto . inet_service", + }) + + tx.Flush(&knftables.Chain{ + Name: nftablesMgmtPortChain, + }) + tx.Add(&knftables.Rule{ + Chain: nftablesMgmtPortChain, + Rule: knftables.Concat( + "oifname", "!=", fmt.Sprintf("%q", cfg.ifName), + "return", + ), + }) + tx.Add(&knftables.Rule{ + Chain: nftablesMgmtPortChain, + Rule: knftables.Concat( + "meta l4proto", ".", "th dport", "@", nftablesMgmtPortNoSNATNodePorts, + counterIfDebug, + "return", + ), + }) + + if cfg.ipv4 != nil { + tx.Add(&knftables.Rule{ + Chain: nftablesMgmtPortChain, + Rule: knftables.Concat( + "ip daddr . meta l4proto . th dport", "@", nftablesMgmtPortNoSNATServicesV4, + counterIfDebug, + "return", + ), + }) + tx.Add(&knftables.Rule{ + Chain: nftablesMgmtPortChain, + Rule: knftables.Concat( + counterIfDebug, + "snat ip to", cfg.ipv4.ifAddr.IP, + ), + }) + } + + if cfg.ipv6 != nil { + tx.Add(&knftables.Rule{ + Chain: nftablesMgmtPortChain, + Rule: knftables.Concat( + "ip6 daddr . meta l4proto . th dport", "@", nftablesMgmtPortNoSNATServicesV6, + counterIfDebug, + "return", + ), + }) + tx.Add(&knftables.Rule{ + Chain: nftablesMgmtPortChain, + Rule: knftables.Concat( + counterIfDebug, + "snat ip6 to", cfg.ipv6.ifAddr.IP, + ), + }) + } + + err := cfg.nft.Run(context.TODO(), tx) + if err != nil { + return fmt.Errorf("could not update nftables rule for management port: %v", err) + } + return nil +} + // createPlatformManagementPort creates a management port attached to the node switch // that lets the node access its pods via their private IP address. This is used // for health checking and other management tasks. @@ -307,40 +370,18 @@ func createPlatformManagementPort(routeManager *routemanager.Controller, interfa return nil, err } - if err = tearDownManagementPortConfig(cfg); err != nil { + if _, err = setupManagementPortConfig(routeManager, cfg); err != nil { return nil, err } - if _, err = setupManagementPortConfig(routeManager, cfg); err != nil { + if err = setupManagementPortNFTables(cfg); err != nil { return nil, err } + DelLegacyMgtPortIptRules() return cfg, nil } -func getIPTablesForHostSubnets(hostSubnets []*net.IPNet) (util.IPTablesHelper, util.IPTablesHelper, error) { - var ipt4, ipt6 util.IPTablesHelper - var err error - - for _, hostSubnet := range hostSubnets { - if utilnet.IsIPv6CIDR(hostSubnet) { - if ipt6 != nil { - continue - } - ipt6, err = util.GetIPTablesHelper(iptables.ProtocolIPv6) - } else { - if ipt4 != nil { - continue - } - ipt4, err = util.GetIPTablesHelper(iptables.ProtocolIPv4) - } - if err != nil { - return nil, nil, err - } - } - return ipt4, ipt6, nil -} - // syncMgmtPortInterface verifies if no other interface configured as management port. This may happen if another // interface had been used as management port or Node was running in different mode. // If old management port is found, its IP configuration is flushed and interface renamed. @@ -421,12 +462,12 @@ func unconfigureMgmtNetdevicePort(hostSubnets []*net.IPNet, mgmtPortName string) } klog.Infof("Found existing management interface. Unconfiguring it") - ipt4, ipt6, err := getIPTablesForHostSubnets(hostSubnets) + nft, err := nodenft.GetNFTablesHelper() if err != nil { - return fmt.Errorf("failed to get iptables: %v", err) + return fmt.Errorf("failed to get nftables: %v", err) } - if err := tearDownInterfaceIPConfig(link, ipt4, ipt6); err != nil { + if err := tearDownManagementPortConfig(link, nft); err != nil { return fmt.Errorf("teardown failed: %v", err) } @@ -457,8 +498,9 @@ func unconfigureMgmtNetdevicePort(hostSubnets []*net.IPNet, mgmtPortName string) return nil } -// DelMgtPortIptRules delete all the iptable rules for the management port. -func DelMgtPortIptRules() { +// DelLegacyMgtPortIptRules deletes legacy iptables rules for the management port; this is +// only used for cleaning up stale rules when upgrading, and can eventually be removed. +func DelLegacyMgtPortIptRules() { // Clean up all iptables and ip6tables remnants that may be left around ipt, err := util.GetIPTablesHelper(iptables.ProtocolIPv4) if err != nil { @@ -480,7 +522,7 @@ func DelMgtPortIptRules() { // checks to make sure that following configurations are present on the k8s node // 1. route entries to cluster CIDR and service CIDR through management port // 2. ARP entry for the node subnet's gateway ip -// 3. IPtables chain and rule for SNATing packets entering the logical topology +// 3. nftables rules for SNATing packets entering the logical topology func checkManagementPortHealth(routeManager *routemanager.Controller, cfg *managementPortConfig) { warnings, err := setupManagementPortConfig(routeManager, cfg) for _, warning := range warnings { @@ -489,4 +531,7 @@ func checkManagementPortHealth(routeManager *routemanager.Controller, cfg *manag if err != nil { klog.Errorf(err.Error()) } + if err = setupManagementPortNFTables(cfg); err != nil { + klog.Errorf(err.Error()) + } } diff --git a/go-controller/pkg/node/management-port_linux_test.go b/go-controller/pkg/node/management-port_linux_test.go index fc85bd2e74..c97a73c991 100644 --- a/go-controller/pkg/node/management-port_linux_test.go +++ b/go-controller/pkg/node/management-port_linux_test.go @@ -17,7 +17,6 @@ import ( "github.com/containernetworking/plugins/pkg/ns" "github.com/containernetworking/plugins/pkg/testutils" - "github.com/coreos/go-iptables/iptables" "github.com/stretchr/testify/mock" "github.com/urfave/cli/v2" "github.com/vishvananda/netlink" @@ -28,6 +27,7 @@ import ( egressservicefake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/clientset/versioned/fake" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" + nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/routemanager" ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" mocks "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/mocks/github.com/vishvananda/netlink" @@ -39,7 +39,7 @@ import ( "k8s.io/client-go/kubernetes/fake" anpfake "sigs.k8s.io/network-policy-api/pkg/client/clientset/versioned/fake" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -59,8 +59,7 @@ func createTempFile(name string) (string, error) { } type managementPortTestConfig struct { - family int - protocol iptables.Protocol + family int clusterCIDR string serviceCIDR string @@ -84,51 +83,42 @@ func (mptc *managementPortTestConfig) GetMgtPortAddr() *netlink.Addr { return mgtPortAddrs } -// setMgmtPortTestIptables sets up fake IPV4 and IPV6 IPTables helpers with needed chains for management port -func setMgmtPortTestIptables(configs []managementPortTestConfig) (util.IPTablesHelper, util.IPTablesHelper) { - var err error - iptV4, iptV6 := util.SetFakeIPTablesHelpers() +// checkMgmtPortTestNFTables validates nftables rules for management port +func checkMgmtPortTestNFTables(configs []managementPortTestConfig, mgmtPortName string) { + nft, err := nodenft.GetNFTablesHelper() + Expect(err).NotTo(HaveOccurred()) + rules, err := nft.ListRules(context.Background(), nftablesMgmtPortChain) + Expect(err).NotTo(HaveOccurred()) + + var returnRule, snatV4Rule, snatV6Rule string + var wantReturnRule, wantSNATV4Rule, wantSNATV6Rule bool + + returnRule = fmt.Sprintf("oifname != %q return", mgmtPortName) + wantReturnRule = true + for _, cfg := range configs { - if cfg.protocol == iptables.ProtocolIPv4 { - err = iptV4.NewChain("nat", "POSTROUTING") - Expect(err).NotTo(HaveOccurred()) - err = iptV4.NewChain("nat", "OVN-KUBE-SNAT-MGMTPORT") - Expect(err).NotTo(HaveOccurred()) + if cfg.family == netlink.FAMILY_V4 { + snatV4Rule = "snat ip to " + cfg.expectedManagementPortIP + wantSNATV4Rule = true } else { - err = iptV6.NewChain("nat", "POSTROUTING") - Expect(err).NotTo(HaveOccurred()) - err = iptV6.NewChain("nat", "OVN-KUBE-SNAT-MGMTPORT") - Expect(err).NotTo(HaveOccurred()) + snatV6Rule = "snat ip6 to " + cfg.expectedManagementPortIP + wantSNATV6Rule = true } } - return iptV4, iptV6 -} -// checkMgmtPortTestIptables validates Iptables rules for management port -func checkMgmtPortTestIptables(configs []managementPortTestConfig, mgmtPortName string, - fakeIpv4, fakeIpv6 *util.FakeIPTables) { - var err error - for _, cfg := range configs { - expectedTables := map[string]util.FakeTable{ - "nat": { - "POSTROUTING": []string{ - "-o " + mgmtPortName + " -j OVN-KUBE-SNAT-MGMTPORT", - }, - "OVN-KUBE-SNAT-MGMTPORT": []string{ - "-o " + mgmtPortName + " -j SNAT --to-source " + cfg.expectedManagementPortIP + " -m comment --comment OVN SNAT to Management Port", - }, - }, - "filter": {}, - "mangle": {}, - } - if cfg.protocol == iptables.ProtocolIPv4 { - err = fakeIpv4.MatchState(expectedTables, nil) - Expect(err).NotTo(HaveOccurred()) - } else { - err = fakeIpv6.MatchState(expectedTables, nil) - Expect(err).NotTo(HaveOccurred()) + for _, rule := range rules { + if wantReturnRule && strings.Contains(rule.Rule, returnRule) { + wantReturnRule = false + } else if wantSNATV4Rule && strings.Contains(rule.Rule, snatV4Rule) { + wantSNATV4Rule = false + } else if wantSNATV6Rule && strings.Contains(rule.Rule, snatV6Rule) { + wantSNATV6Rule = false } } + + Expect(wantReturnRule).To(BeFalse(), "did not find rule with %q", returnRule) + Expect(wantSNATV4Rule).To(BeFalse(), "did not find rule with %q", snatV4Rule) + Expect(wantSNATV6Rule).To(BeFalse(), "did not find rule with %q", snatV6Rule) } // checkMgmtTestPortIpsAndRoutes checks IPs and Routes of the management port @@ -193,15 +183,19 @@ func checkMgmtTestPortIpsAndRoutes(configs []managementPortTestConfig, mgmtPortN } func testManagementPort(ctx *cli.Context, fexec *ovntest.FakeExec, testNS ns.NetNS, - configs []managementPortTestConfig, expectedLRPMAC string) { + configs []managementPortTestConfig, expectedLRPMAC string, legacy bool) { const ( nodeName string = "node1" - mgtPortMAC string = "00:00:00:55:66:77" mgtPort string = types.K8sMgmtIntfName legacyMgtPort string = types.K8sPrefix + nodeName mtu string = "1400" ) + mgmtPortMAC := util.IPAddrToHWAddr(net.ParseIP(configs[0].expectedManagementPortIP)) + if legacy { + mgmtPortMAC, _ = net.ParseMAC("00:11:22:33:44:55") + } + // generic setup fexec.AddFakeCmd(&ovntest.ExpectedCmd{ Cmd: "ovs-vsctl --timeout=15 --no-headings --data bare --format csv --columns type,name find Interface name=" + mgtPort, @@ -212,14 +206,7 @@ func testManagementPort(ctx *cli.Context, fexec *ovntest.FakeExec, testNS ns.Net Output: "internal," + mgtPort + "_0", }) fexec.AddFakeCmdsNoOutputNoError([]string{ - "ovs-vsctl --timeout=15 -- --if-exists del-port br-int " + legacyMgtPort + " -- --may-exist add-port br-int " + mgtPort + " -- set interface " + mgtPort + " type=internal mtu_request=" + mtu + " external-ids:iface-id=" + legacyMgtPort, - }) - fexec.AddFakeCmd(&ovntest.ExpectedCmd{ - Cmd: "ovs-vsctl --timeout=15 --if-exists get interface " + mgtPort + " mac_in_use", - Output: mgtPortMAC, - }) - fexec.AddFakeCmdsNoOutputNoError([]string{ - "ovs-vsctl --timeout=15 set interface " + mgtPort + " " + fmt.Sprintf("mac=%s", strings.ReplaceAll(mgtPortMAC, ":", "\\:")), + "ovs-vsctl --timeout=15 -- --if-exists del-port br-int " + legacyMgtPort + " -- --may-exist add-port br-int " + mgtPort + " -- set interface " + mgtPort + " mac=\"" + mgmtPortMAC.String() + "\"" + " type=internal mtu_request=" + mtu + " external-ids:iface-id=" + legacyMgtPort, }) for _, cfg := range configs { // We do not enable per-interface forwarding for IPv6 @@ -250,12 +237,17 @@ func testManagementPort(ctx *cli.Context, fexec *ovntest.FakeExec, testNS ns.Net mgtPortAddrs[i] = cfg.GetMgtPortAddr() } - iptV4, iptV6 := setMgmtPortTestIptables(configs) + nodenft.SetFakeNFTablesHelper() existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{ Name: nodeName, }} + if legacy { + existingNode.Annotations = map[string]string{ + util.OvnNodeManagementPortMacAddresses: fmt.Sprintf("{\"default\":%q}", mgmtPortMAC)} + } + fakeClient := fake.NewSimpleClientset(&v1.NodeList{ Items: []v1.Node{existingNode}, }) @@ -265,7 +257,9 @@ func testManagementPort(ctx *cli.Context, fexec *ovntest.FakeExec, testNS ns.Net _, err = config.InitConfig(ctx, fexec, nil) Expect(err).NotTo(HaveOccurred()) - kubeInterface := &kube.KubeOVN{Kube: kube.Kube{KClient: fakeClient}, ANPClient: anpfake.NewSimpleClientset(), EIPClient: egressipv1fake.NewSimpleClientset(), EgressFirewallClient: &egressfirewallfake.Clientset{}, EgressServiceClient: &egressservicefake.Clientset{}} + kubeInterface := &kube.KubeOVN{Kube: kube.Kube{KClient: fakeClient}, ANPClient: anpfake.NewSimpleClientset(), + EIPClient: egressipv1fake.NewSimpleClientset(), EgressFirewallClient: &egressfirewallfake.Clientset{}, + EgressServiceClient: &egressservicefake.Clientset{}} nodeAnnotator := kube.NewNodeAnnotator(kubeInterface, existingNode.Name) watchFactory, err := factory.NewNodeWatchFactory(fakeNodeClient, nodeName) Expect(err).NotTo(HaveOccurred()) @@ -304,14 +298,7 @@ func testManagementPort(ctx *cli.Context, fexec *ovntest.FakeExec, testNS ns.Net err = waiter.Wait() Expect(err).NotTo(HaveOccurred()) - checkMgmtPortTestIptables(configs, mgtPort, iptV4.(*util.FakeIPTables), iptV6.(*util.FakeIPTables)) - - updatedNode, err := fakeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - macFromAnnotation, err := util.ParseNodeManagementPortMACAddresses(updatedNode, types.DefaultNetworkName) - Expect(err).NotTo(HaveOccurred()) - Expect(macFromAnnotation.String()).To(Equal(mgtPortMAC)) + checkMgmtPortTestNFTables(configs, mgtPort) Expect(fexec.CalledMatchesExpected()).To(BeTrue(), fexec.ErrorDesc) } @@ -411,14 +398,6 @@ func testManagementPortDPU(ctx *cli.Context, fexec *ovntest.FakeExec, testNS ns. Expect(err).NotTo(HaveOccurred()) err = waiter.Wait() Expect(err).NotTo(HaveOccurred()) - - updatedNode, err := fakeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - macFromAnnotation, err := util.ParseNodeManagementPortMACAddresses(updatedNode, types.DefaultNetworkName) - Expect(err).NotTo(HaveOccurred()) - Expect(macFromAnnotation.String()).To(Equal(mgtPortMAC)) - Expect(fexec.CalledMatchesExpected()).To(BeTrue(), fexec.ErrorDesc) } @@ -457,7 +436,7 @@ func testManagementPortDPUHost(ctx *cli.Context, fexec *ovntest.FakeExec, testNS mgtPortAddrs[i] = cfg.GetMgtPortAddr() } - iptV4, iptV6 := setMgmtPortTestIptables(configs) + nodenft.SetFakeNFTablesHelper() _, err = config.InitConfig(ctx, fexec, nil) Expect(err).NotTo(HaveOccurred()) @@ -494,7 +473,7 @@ func testManagementPortDPUHost(ctx *cli.Context, fexec *ovntest.FakeExec, testNS }) Expect(err).NotTo(HaveOccurred()) - checkMgmtPortTestIptables(configs, mgtPort, iptV4.(*util.FakeIPTables), iptV6.(*util.FakeIPTables)) + checkMgmtPortTestNFTables(configs, mgtPort) Expect(fexec.CalledMatchesExpected()).To(BeTrue(), fexec.ErrorDesc) } @@ -822,8 +801,7 @@ var _ = Describe("Management Port Operations", func() { testManagementPort(ctx, fexec, testNS, []managementPortTestConfig{ { - family: netlink.FAMILY_V4, - protocol: iptables.ProtocolIPv4, + family: netlink.FAMILY_V4, clusterCIDR: v4clusterCIDR, nodeSubnet: v4nodeSubnet, @@ -831,7 +809,30 @@ var _ = Describe("Management Port Operations", func() { expectedManagementPortIP: v4mgtPortIP, expectedGatewayIP: v4gwIP, }, - }, v4lrpMAC) + }, v4lrpMAC, false) + return nil + } + err := app.Run([]string{ + app.Name, + "--cluster-subnets=" + v4clusterCIDR, + }) + Expect(err).NotTo(HaveOccurred()) + }) + + ovntest.OnSupportedPlatformsIt("sets up the management port for IPv4 clusters with legacy annotation", func() { + app.Action = func(ctx *cli.Context) error { + testManagementPort(ctx, fexec, testNS, + []managementPortTestConfig{ + { + family: netlink.FAMILY_V4, + + clusterCIDR: v4clusterCIDR, + nodeSubnet: v4nodeSubnet, + + expectedManagementPortIP: v4mgtPortIP, + expectedGatewayIP: v4gwIP, + }, + }, v4lrpMAC, true) return nil } err := app.Run([]string{ @@ -846,8 +847,7 @@ var _ = Describe("Management Port Operations", func() { testManagementPort(ctx, fexec, testNS, []managementPortTestConfig{ { - family: netlink.FAMILY_V6, - protocol: iptables.ProtocolIPv6, + family: netlink.FAMILY_V6, clusterCIDR: v6clusterCIDR, serviceCIDR: v6serviceCIDR, @@ -856,7 +856,7 @@ var _ = Describe("Management Port Operations", func() { expectedManagementPortIP: v6mgtPortIP, expectedGatewayIP: v6gwIP, }, - }, v6lrpMAC) + }, v6lrpMAC, false) return nil } err := app.Run([]string{ @@ -872,8 +872,7 @@ var _ = Describe("Management Port Operations", func() { testManagementPort(ctx, fexec, testNS, []managementPortTestConfig{ { - family: netlink.FAMILY_V4, - protocol: iptables.ProtocolIPv4, + family: netlink.FAMILY_V4, clusterCIDR: v4clusterCIDR, serviceCIDR: v4serviceCIDR, @@ -883,8 +882,7 @@ var _ = Describe("Management Port Operations", func() { expectedGatewayIP: v4gwIP, }, { - family: netlink.FAMILY_V6, - protocol: iptables.ProtocolIPv6, + family: netlink.FAMILY_V6, clusterCIDR: v6clusterCIDR, serviceCIDR: v6serviceCIDR, @@ -893,7 +891,7 @@ var _ = Describe("Management Port Operations", func() { expectedManagementPortIP: v6mgtPortIP, expectedGatewayIP: v6gwIP, }, - }, v4lrpMAC) + }, v4lrpMAC, false) return nil } err := app.Run([]string{ @@ -923,8 +921,7 @@ var _ = Describe("Management Port Operations", func() { testManagementPortDPU(ctx, fexec, testNS, []managementPortTestConfig{ { - family: netlink.FAMILY_V4, - protocol: iptables.ProtocolIPv4, + family: netlink.FAMILY_V4, clusterCIDR: v4clusterCIDR, serviceCIDR: v4serviceCIDR, @@ -963,8 +960,7 @@ var _ = Describe("Management Port Operations", func() { testManagementPortDPUHost(ctx, fexec, testNS, []managementPortTestConfig{ { - family: netlink.FAMILY_V4, - protocol: iptables.ProtocolIPv4, + family: netlink.FAMILY_V4, clusterCIDR: v4clusterCIDR, serviceCIDR: v4serviceCIDR, diff --git a/go-controller/pkg/node/management-port_test.go b/go-controller/pkg/node/management-port_test.go index 5e2ff9896a..f0b9ee7e5d 100644 --- a/go-controller/pkg/node/management-port_test.go +++ b/go-controller/pkg/node/management-port_test.go @@ -3,7 +3,7 @@ package node import ( "reflect" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" diff --git a/go-controller/pkg/node/nftables/helpers.go b/go-controller/pkg/node/nftables/helpers.go new file mode 100644 index 0000000000..3e8ed11ff4 --- /dev/null +++ b/go-controller/pkg/node/nftables/helpers.go @@ -0,0 +1,57 @@ +//go:build linux +// +build linux + +package nftables + +import ( + "context" + + "sigs.k8s.io/knftables" +) + +const OVNKubernetesNFTablesName = "ovn-kubernetes" + +var nftHelper knftables.Interface + +// SetFakeNFTablesHelper creates a fake knftables.Interface +func SetFakeNFTablesHelper() *knftables.Fake { + fake := knftables.NewFake(knftables.InetFamily, OVNKubernetesNFTablesName) + tx := fake.NewTransaction() + tx.Add(&knftables.Table{}) + _ = fake.Run(context.TODO(), tx) + + nftHelper = fake + return fake +} + +// GetNFTablesHelper returns a knftables.Interface. If SetFakeNFTablesHelper has not been +// called, it will create a "real" knftables.Interface +func GetNFTablesHelper() (knftables.Interface, error) { + if nftHelper == nil { + nft, err := knftables.New(knftables.InetFamily, OVNKubernetesNFTablesName) + if err != nil { + return nil, err + } + tx := nft.NewTransaction() + tx.Add(&knftables.Table{}) + err = nft.Run(context.TODO(), tx) + if err != nil { + return nil, err + } + + nftHelper = nft + } + return nftHelper, nil +} + +// CleanupNFTables cleans up all ovn-kubernetes NFTables data, on ovnkube-node daemonset +// deletion. +func CleanupNFTables() { + nft, _ := GetNFTablesHelper() + if nft == nil { + return + } + tx := nft.NewTransaction() + tx.Delete(&knftables.Table{}) + _ = nft.Run(context.Background(), tx) +} diff --git a/go-controller/pkg/node/nftables/testing.go b/go-controller/pkg/node/nftables/testing.go new file mode 100644 index 0000000000..ad377caeca --- /dev/null +++ b/go-controller/pkg/node/nftables/testing.go @@ -0,0 +1,38 @@ +//go:build linux +// +build linux + +package nftables + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// MatchNFTRules checks that the expected nftables rules match the actual ones, ignoring +// order. +func MatchNFTRules(expected, actual string) error { + expectedSet := sets.New(strings.Split(expected, "\n")...) + actualSet := sets.New(strings.Split(actual, "\n")...) + + // ignore blank lines + expectedSet.Delete("") + actualSet.Delete("") + + missing := expectedSet.Difference(actualSet) + extra := actualSet.Difference(expectedSet) + + if len(missing) == 0 && len(extra) == 0 { + return nil + } + + msg := "nftables rule mismatch:" + if len(missing) > 0 { + msg += fmt.Sprintf("\nMissing rules: %v\n", missing.UnsortedList()) + } + if len(extra) > 0 { + msg += fmt.Sprintf("\nExtra rules: %v\n", extra.UnsortedList()) + } + return fmt.Errorf("%s", msg) +} diff --git a/go-controller/pkg/node/nftables/util.go b/go-controller/pkg/node/nftables/util.go new file mode 100644 index 0000000000..1a4a3bdd21 --- /dev/null +++ b/go-controller/pkg/node/nftables/util.go @@ -0,0 +1,43 @@ +//go:build linux +// +build linux + +package nftables + +import ( + "context" + + "sigs.k8s.io/knftables" +) + +// UpdateNFTElements adds/updates the given nftables set/map elements. The set or map must +// already exist. +func UpdateNFTElements(elements []*knftables.Element) error { + nft, err := GetNFTablesHelper() + if err != nil { + return err + } + + tx := nft.NewTransaction() + for _, elem := range elements { + tx.Add(elem) + } + return nft.Run(context.TODO(), tx) +} + +// DeleteNFTElements deletes the given nftables set/map elements. The set or map must +// exist, but if the elements aren't already in the set/map, no error is returned. +func DeleteNFTElements(elements []*knftables.Element) error { + nft, err := GetNFTablesHelper() + if err != nil { + return err + } + + tx := nft.NewTransaction() + for _, elem := range elements { + // We add+delete the elements, rather than just deleting them, so that if + // they weren't already in the set/map, we won't get an error on delete. + tx.Add(elem) + tx.Delete(elem) + } + return nft.Run(context.TODO(), tx) +} diff --git a/go-controller/pkg/node/node_ip_handler_linux.go b/go-controller/pkg/node/node_ip_handler_linux.go index 2508d14ce8..0afd0dbed9 100644 --- a/go-controller/pkg/node/node_ip_handler_linux.go +++ b/go-controller/pkg/node/node_ip_handler_linux.go @@ -115,6 +115,10 @@ func (c *addressManager) ListAddresses() []net.IP { type subscribeFn func() (bool, chan netlink.AddrUpdate, error) func (c *addressManager) Run(stopChan <-chan struct{}, doneWg *sync.WaitGroup) { + if config.OvnKubeNode.Mode == types.NodeModeDPU { + return + } + c.addHandlerForPrimaryAddrChange() doneWg.Add(1) go func() { @@ -402,14 +406,32 @@ func (c *addressManager) isValidNodeIP(addr net.IP, linkIndex int) bool { if util.IsAddressReservedForInternalUse(addr) { return false } - if config.OVNKubernetesFeature.EnableEgressIP && !util.PlatformTypeIsEgressIPCloudProvider() { - // IPs assigned to host interfaces to support the egress IP multi NIC feature must be excluded. - eipAddresses, err := c.getSecondaryHostEgressIPs() - if err != nil { - klog.Errorf("Failed to get secondary host assigned Egress IPs and ensure they are excluded: %v", err) + if config.OVNKubernetesFeature.EnableEgressIP { + // EIP assigned to the primary interface which selects pods with a role primary user defined network must be excluded. + if util.IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnableInterconnect && config.Gateway.Mode != config.GatewayModeDisabled { + // Two methods to lookup EIPs assigned to the gateway bridge. Fast path from a shared cache or slow path from node annotations. + // At startup, gateway bridge cache gets sync + if c.gatewayBridge.eipMarkIPs != nil && c.gatewayBridge.eipMarkIPs.HasSyncdOnce() && c.gatewayBridge.eipMarkIPs.IsIPPresent(addr) { + return false + } else { + if eipAddresses, err := c.getPrimaryHostEgressIPs(); err != nil { + klog.Errorf("Failed to get primary host assigned Egress IPs and ensure they are excluded: %v", err) + } else { + if eipAddresses.Has(addr.String()) { + return false + } + } + } } - if eipAddresses.Has(addr.String()) { - return false + if !util.PlatformTypeIsEgressIPCloudProvider() { + // IPs assigned to host interfaces to support the egress IP multi NIC feature must be excluded. + if eipAddresses, err := c.getSecondaryHostEgressIPs(); err != nil { + klog.Errorf("Failed to get secondary host assigned Egress IPs and ensure they are excluded: %v", err) + } else { + if eipAddresses.Has(addr.String()) { + return false + } + } } } @@ -417,6 +439,10 @@ func (c *addressManager) isValidNodeIP(addr net.IP, linkIndex int) bool { } func (c *addressManager) sync() { + if config.OvnKubeNode.Mode == types.NodeModeDPU { + return + } + var addrs []netlink.Addr if c.useNetlink { @@ -475,6 +501,22 @@ func (c *addressManager) getSecondaryHostEgressIPs() (sets.Set[string], error) { return eipAddrs, nil } +func (c *addressManager) getPrimaryHostEgressIPs() (sets.Set[string], error) { + node, err := c.watchFactory.GetNode(c.nodeName) + if err != nil { + return nil, fmt.Errorf("unable to get Node from informer: %v", err) + } + eipAddrs, err := util.ParseNodeBridgeEgressIPsAnnotation(node) + if err != nil { + if util.IsAnnotationNotSetError(err) { + eipAddrs = make([]string, 0) + } else { + return nil, err + } + } + return sets.New[string](eipAddrs...), nil +} + // updateOVNEncapIPAndReconnect updates encap IP to OVS when the node primary IP changed. func updateOVNEncapIPAndReconnect(newIP net.IP) { checkCmd := []string{ diff --git a/go-controller/pkg/node/node_ip_handler_linux_test.go b/go-controller/pkg/node/node_ip_handler_linux_test.go index 164fb5ecbc..5a8f1141f5 100644 --- a/go-controller/pkg/node/node_ip_handler_linux_test.go +++ b/go-controller/pkg/node/node_ip_handler_linux_test.go @@ -8,12 +8,13 @@ import ( "sync/atomic" "time" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" + nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" ovntypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" @@ -379,19 +380,20 @@ func configureKubeOVNContext(nodeName string, useNetlink bool) *testCtx { ifName: nodeName, link: nil, routerMAC: nil, + nft: nodenft.SetFakeNFTablesHelper(), ipv4: &managementPortIPFamilyConfig{ - ipt: nil, allSubnets: nil, ifAddr: tc.mgmtPortIP4, gwIP: tc.mgmtPortIP4.IP, }, ipv6: &managementPortIPFamilyConfig{ - ipt: nil, allSubnets: nil, ifAddr: tc.mgmtPortIP6, gwIP: tc.mgmtPortIP6.IP, }, } + err = setupManagementPortNFTables(fakeMgmtPortConfig) + Expect(err).NotTo(HaveOccurred()) fakeBridgeConfiguration := &bridgeConfiguration{bridgeName: "breth0"} diff --git a/go-controller/pkg/node/node_suite_test.go b/go-controller/pkg/node/node_suite_test.go index 374ebafb67..7cd840a480 100644 --- a/go-controller/pkg/node/node_suite_test.go +++ b/go-controller/pkg/node/node_suite_test.go @@ -3,7 +3,7 @@ package node import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/node/obj_retry_gateway.go b/go-controller/pkg/node/obj_retry_gateway.go index c1d6a4ac8c..9519d19eb6 100644 --- a/go-controller/pkg/node/obj_retry_gateway.go +++ b/go-controller/pkg/node/obj_retry_gateway.go @@ -9,6 +9,7 @@ import ( cache "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" + egressipv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/retry" ) @@ -53,6 +54,15 @@ func (h *gwEventHandler) AreResourcesEqual(obj1, obj2 interface{}) (bool, error) // always run update code return false, nil + case factory.EgressIPType: + // we don't care about pkt mark annotation changing as we assume its value should not change and if it does, we don't + // want to react to that. + oldEIP, newEIP := obj1.(*egressipv1.EgressIP), obj2.(*egressipv1.EgressIP) + if reflect.DeepEqual(oldEIP.Status.Items, newEIP.Status.Items) && reflect.DeepEqual(oldEIP.Spec.EgressIPs, newEIP.Spec.EgressIPs) { + return true, nil + } + return false, nil + default: return false, fmt.Errorf("no object comparison for type %s", h.objType) } @@ -79,6 +89,9 @@ func (h *gwEventHandler) GetResourceFromInformerCache(key string) (interface{}, case factory.ServiceForGatewayType: obj, err = h.g.watchFactory.GetService(namespace, name) + case factory.EgressIPType: + obj, err = h.g.watchFactory.GetEgressIP(name) + default: err = fmt.Errorf("object type %s not supported, cannot retrieve it from informers cache", h.objType) @@ -100,6 +113,11 @@ func (h *gwEventHandler) AddResource(obj interface{}, fromRetryLoop bool) error case factory.EndpointSliceForGatewayType: endpointSlice := obj.(*discovery.EndpointSlice) return h.g.AddEndpointSlice(endpointSlice) + + case factory.EgressIPType: + eip := obj.(*egressipv1.EgressIP) + return h.g.AddEgressIP(eip) + default: return fmt.Errorf("no add function for object type %s", h.objType) } @@ -121,6 +139,11 @@ func (h *gwEventHandler) UpdateResource(oldObj, newObj interface{}, inRetryCache newEndpointSlice := newObj.(*discovery.EndpointSlice) return h.g.UpdateEndpointSlice(oldEndpointSlice, newEndpointSlice) + case factory.EgressIPType: + oldEIP := oldObj.(*egressipv1.EgressIP) + newEIP := newObj.(*egressipv1.EgressIP) + return h.g.UpdateEgressIP(oldEIP, newEIP) + default: return fmt.Errorf("no update function for object type %s", h.objType) } @@ -139,6 +162,11 @@ func (h *gwEventHandler) DeleteResource(obj, cachedObj interface{}) error { case factory.EndpointSliceForGatewayType: endpointSlice := obj.(*discovery.EndpointSlice) return h.g.DeleteEndpointSlice(endpointSlice) + + case factory.EgressIPType: + eip := obj.(*egressipv1.EgressIP) + return h.g.DeleteEgressIP(eip) + default: return fmt.Errorf("no delete function for object type %s", h.objType) } @@ -160,6 +188,9 @@ func (h *gwEventHandler) SyncFunc(objs []interface{}) error { case factory.ServiceForGatewayType: syncFunc = h.g.SyncServices + case factory.EgressIPType: + syncFunc = h.g.SyncEgressIP + default: return fmt.Errorf("no sync function for object type %s", h.objType) } diff --git a/go-controller/pkg/node/openflow_manager.go b/go-controller/pkg/node/openflow_manager.go index cd700ec875..9094ebedc2 100644 --- a/go-controller/pkg/node/openflow_manager.go +++ b/go-controller/pkg/node/openflow_manager.go @@ -10,6 +10,7 @@ import ( "time" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/generator/udn" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" @@ -39,10 +40,10 @@ func (c *openflowManager) getExGwBridgePortConfigurations() ([]bridgeUDNConfigur return c.externalGatewayBridge.getBridgePortConfigurations() } -func (c *openflowManager) addNetwork(nInfo util.NetInfo, masqCTMark uint, v4MasqIP, v6MasqIP *net.IPNet) { - c.defaultBridge.addNetworkBridgeConfig(nInfo, masqCTMark, v4MasqIP, v6MasqIP) +func (c *openflowManager) addNetwork(nInfo util.NetInfo, masqCTMark, pktMark uint, v6MasqIPs, v4MasqIPs *udn.MasqueradeIPs) { + c.defaultBridge.addNetworkBridgeConfig(nInfo, masqCTMark, pktMark, v6MasqIPs, v4MasqIPs) if c.externalGatewayBridge != nil { - c.externalGatewayBridge.addNetworkBridgeConfig(nInfo, masqCTMark, v4MasqIP, v6MasqIP) + c.externalGatewayBridge.addNetworkBridgeConfig(nInfo, masqCTMark, pktMark, v6MasqIPs, v4MasqIPs) } } @@ -53,6 +54,10 @@ func (c *openflowManager) delNetwork(nInfo util.NetInfo) { } } +func (c *openflowManager) getActiveNetwork(nInfo util.NetInfo) *bridgeUDNConfiguration { + return c.defaultBridge.getActiveNetworkBridgeConfig(nInfo.GetNetworkName()) +} + // END UDN UTILs func (c *openflowManager) getDefaultBridgeName() string { @@ -249,9 +254,13 @@ func checkPorts(netConfigs []bridgeUDNConfiguration, physIntf, ofPortPhys string } if netConfig.ofPortPatch != curOfportPatch { - klog.Errorf("Fatal error: patch port %s ofport changed from %s to %s", - netConfig.patchPort, netConfig.ofPortPatch, curOfportPatch) - os.Exit(1) + if netConfig.isDefaultNetwork() || curOfportPatch != "" { + klog.Errorf("Fatal error: patch port %s ofport changed from %s to %s", + netConfig.patchPort, netConfig.ofPortPatch, curOfportPatch) + os.Exit(1) + } else { + klog.Warningf("Patch port %s removed for existing network", netConfig.patchPort) + } } } diff --git a/go-controller/pkg/node/ovn_test.go b/go-controller/pkg/node/ovn_test.go index b171ed4f5a..e0212f6b4e 100644 --- a/go-controller/pkg/node/ovn_test.go +++ b/go-controller/pkg/node/ovn_test.go @@ -4,6 +4,7 @@ import ( "context" "sync" + nadfake "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/fake" . "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" adminpolicybasedrouteclient "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/clientset/versioned/fake" @@ -59,6 +60,7 @@ func (o *FakeOVNNode) start(ctx *cli.Context, objects ...runtime.Object) { KubeClient: fake.NewSimpleClientset(v1Objects...), EgressServiceClient: egressservicefake.NewSimpleClientset(egressServiceObjects...), AdminPolicyRouteClient: adminpolicybasedrouteclient.NewSimpleClientset(), + NetworkAttchDefClient: nadfake.NewSimpleClientset(), } o.init() // initializes the node } @@ -83,8 +85,9 @@ func (o *FakeOVNNode) init() { Expect(err).NotTo(HaveOccurred()) cnnci := NewCommonNodeNetworkControllerInfo(o.fakeClient.KubeClient, o.fakeClient.AdminPolicyRouteClient, o.watcher, o.recorder, fakeNodeName, routemanager.NewController()) - o.nc = newDefaultNodeNetworkController(cnnci, o.stopChan, o.wg, routemanager.NewController()) + o.nc = newDefaultNodeNetworkController(cnnci, o.stopChan, o.wg, routemanager.NewController(), nil) // watcher is started by nodeNetworkControllerManager, not by nodeNetworkcontroller, so start it here. o.watcher.Start() + o.nc.PreStart(context.TODO()) o.nc.Start(context.TODO()) } diff --git a/go-controller/pkg/node/port_claim_test.go b/go-controller/pkg/node/port_claim_test.go index 51c8680a18..c24d95ce8d 100644 --- a/go-controller/pkg/node/port_claim_test.go +++ b/go-controller/pkg/node/port_claim_test.go @@ -6,7 +6,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/urfave/cli/v2" kapi "k8s.io/api/core/v1" diff --git a/go-controller/pkg/node/routemanager/route_manager_suite_test.go b/go-controller/pkg/node/routemanager/route_manager_suite_test.go index dc3207f956..9ef504365b 100644 --- a/go-controller/pkg/node/routemanager/route_manager_suite_test.go +++ b/go-controller/pkg/node/routemanager/route_manager_suite_test.go @@ -3,7 +3,7 @@ package routemanager import ( "testing" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/node/routemanager/route_manager_test.go b/go-controller/pkg/node/routemanager/route_manager_test.go index c9d69138cb..57da31f295 100644 --- a/go-controller/pkg/node/routemanager/route_manager_test.go +++ b/go-controller/pkg/node/routemanager/route_manager_test.go @@ -2,17 +2,18 @@ package routemanager import ( "net" - "os" "runtime" "sync" "time" "github.com/containernetworking/plugins/pkg/ns" "github.com/containernetworking/plugins/pkg/testutils" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "github.com/vishvananda/netlink" utilsnet "k8s.io/utils/net" + + ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" ) var _ = ginkgo.Describe("Route Manager", func() { @@ -38,7 +39,7 @@ var _ = ginkgo.Describe("Route Manager", func() { loIPDiff := net.IPv4(127, 1, 1, 2) loGWIP := net.IPv4(127, 1, 1, 254) customTableID := 1005 - if os.Getenv("NOROOT") == "TRUE" { + if ovntest.NoRoot() { defer ginkgo.GinkgoRecover() ginkgo.Skip("Test requires root privileges") } diff --git a/go-controller/pkg/node/secondary_node_network_controller.go b/go-controller/pkg/node/secondary_node_network_controller.go index e3ebb4e9c9..f3bf50f472 100644 --- a/go-controller/pkg/node/secondary_node_network_controller.go +++ b/go-controller/pkg/node/secondary_node_network_controller.go @@ -102,15 +102,15 @@ func (nc *SecondaryNodeNetworkController) Cleanup() error { } func (oc *SecondaryNodeNetworkController) getNetworkID() (int, error) { - if oc.networkID == nil || *oc.networkID == util.InvalidNetworkID { - oc.networkID = ptr.To(util.InvalidNetworkID) + if oc.networkID == nil || *oc.networkID == util.InvalidID { + oc.networkID = ptr.To(util.InvalidID) nodes, err := oc.watchFactory.GetNodes() if err != nil { - return util.InvalidNetworkID, err + return util.InvalidID, err } *oc.networkID, err = util.GetNetworkID(nodes, oc.NetInfo) if err != nil { - return util.InvalidNetworkID, err + return util.InvalidID, err } } return *oc.networkID, nil diff --git a/go-controller/pkg/node/secondary_node_network_controller_test.go b/go-controller/pkg/node/secondary_node_network_controller_test.go index cf1b950576..914140e4fe 100644 --- a/go-controller/pkg/node/secondary_node_network_controller_test.go +++ b/go-controller/pkg/node/secondary_node_network_controller_test.go @@ -3,12 +3,13 @@ package node import ( "context" "fmt" + "net" "sync" "time" "github.com/containernetworking/plugins/pkg/ns" "github.com/containernetworking/plugins/pkg/testutils" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/stretchr/testify/mock" "github.com/vishvananda/netlink" @@ -35,22 +36,8 @@ var _ = Describe("SecondaryNodeNetworkController", func() { var ( nad = ovntest.GenerateNAD("bluenet", "rednad", "greenamespace", types.Layer3Topology, "100.128.0.0/16", types.NetworkRolePrimary) - netName = "bluenet" - netID = 3 - nodeName string = "worker1" - mgtPortMAC string = "00:00:00:55:66:77" - fexec *ovntest.FakeExec - testNS ns.NetNS - vrf *vrfmanager.Controller - ipRulesManager *iprulemanager.Controller - v4NodeSubnet = "10.128.0.0/24" - v6NodeSubnet = "ae70::66/112" - mgtPort = fmt.Sprintf("%s%d", types.K8sMgmtIntfNamePrefix, netID) - gatewayInterface = "eth0" - gatewayBridge = "breth0" - stopCh chan struct{} - wg *sync.WaitGroup - kubeMock kubemocks.Interface + fexec *ovntest.FakeExec + mgtPortMAC string = "00:00:00:55:66:77" // dummy MAC used for fake commands ) BeforeEach(func() { // Restore global default values before each testcase @@ -59,66 +46,11 @@ var _ = Describe("SecondaryNodeNetworkController", func() { config.Gateway.V6MasqueradeSubnet = "fd69::/112" config.Gateway.V4MasqueradeSubnet = "169.254.0.0/17" // Set up a fake vsctl command mock interface - kubeMock = kubemocks.Interface{} fexec = ovntest.NewFakeExec() - err := util.SetExec(fexec) - Expect(err).NotTo(HaveOccurred()) - // Set up a fake k8sMgmt interface - testNS, err = testutils.NewNS() - Expect(err).NotTo(HaveOccurred()) - err = testNS.Do(func(ns.NetNS) error { - defer GinkgoRecover() - ovntest.AddLink(gatewayInterface) - link := ovntest.AddLink(gatewayBridge) - ovntest.AddLink(mgtPort) - addr, _ := netlink.ParseAddr("169.254.169.2/29") - err = netlink.AddrAdd(link, addr) - if err != nil { - return err - } - addr, _ = netlink.ParseAddr("10.0.0.5/24") - err = netlink.AddrAdd(link, addr) - if err != nil { - return err - } - return nil - }) - Expect(err).NotTo(HaveOccurred()) - wg = &sync.WaitGroup{} - stopCh = make(chan struct{}) - routeManager := routemanager.NewController() - wg.Add(1) - go testNS.Do(func(netNS ns.NetNS) error { - defer wg.Done() - routeManager.Run(stopCh, 2*time.Minute) - return nil - }) - ipRulesManager = iprulemanager.NewController(true, true) - wg.Add(1) - go testNS.Do(func(netNS ns.NetNS) error { - defer wg.Done() - ipRulesManager.Run(stopCh, 4*time.Minute) - return nil - }) - vrf = vrfmanager.NewController(routeManager) - wg2 := &sync.WaitGroup{} - defer func() { - wg2.Wait() - }() - wg2.Add(1) - go testNS.Do(func(netNS ns.NetNS) error { - defer wg2.Done() - defer GinkgoRecover() - err = vrf.Run(stopCh, wg) - Expect(err).NotTo(HaveOccurred()) - return nil - }) + Expect(util.SetExec(fexec)).To(Succeed()) }) AfterEach(func() { - close(stopCh) - wg.Wait() - Expect(testNS.Close()).To(Succeed()) - Expect(testutils.UnmountNS(testNS)).To(Succeed()) + util.ResetRunner() }) It("should return networkID from one of the nodes in the cluster", func() { @@ -168,7 +100,7 @@ var _ = Describe("SecondaryNodeNetworkController", func() { networkID, err := controller.getNetworkID() Expect(err).To(HaveOccurred()) - Expect(networkID).To(Equal(util.InvalidNetworkID)) + Expect(networkID).To(Equal(util.InvalidID)) }) It("ensure UDNGateway is not invoked when feature gate is OFF", func() { config.OVNKubernetesFeature.EnableNetworkSegmentation = false @@ -218,6 +150,7 @@ var _ = Describe("SecondaryNodeNetworkController", func() { nodeInformer.On("Lister").Return(&nodeLister) NetInfo, err := util.ParseNADInfo(nad) Expect(err).NotTo(HaveOccurred()) + getCreationFakeCommands(fexec, "ovn-k8s-mp3", mgtPortMAC, NetInfo.GetNetworkName(), "worker1", NetInfo.MTU()) controller, err := NewSecondaryNodeNetworkController(&cnnci, NetInfo, nil, nil, &gateway{}) Expect(err).NotTo(HaveOccurred()) err = controller.Start(context.Background()) @@ -252,6 +185,99 @@ var _ = Describe("SecondaryNodeNetworkController", func() { Expect(err).NotTo(HaveOccurred()) Expect(controller.gateway).To(BeNil()) }) +}) + +var _ = Describe("SecondaryNodeNetworkController: UserDefinedPrimaryNetwork Gateway functionality", func() { + var ( + nad = ovntest.GenerateNAD("bluenet", "rednad", "greenamespace", + types.Layer3Topology, "100.128.0.0/16", types.NetworkRolePrimary) + netName = "bluenet" + netID = 3 + nodeName string = "worker1" + mgtPortMAC string = "00:00:00:55:66:77" + fexec *ovntest.FakeExec + testNS ns.NetNS + vrf *vrfmanager.Controller + ipRulesManager *iprulemanager.Controller + v4NodeSubnet = "10.128.0.0/24" + v6NodeSubnet = "ae70::66/112" + mgtPort = fmt.Sprintf("%s%d", types.K8sMgmtIntfNamePrefix, netID) + gatewayInterface = "eth0" + gatewayBridge = "breth0" + stopCh chan struct{} + wg *sync.WaitGroup + kubeMock kubemocks.Interface + ) + BeforeEach(func() { + // Restore global default values before each testcase + Expect(config.PrepareTestConfig()).To(Succeed()) + // Use a larger masq subnet to allow OF manager to allocate IPs for UDNs. + config.Gateway.V6MasqueradeSubnet = "fd69::/112" + config.Gateway.V4MasqueradeSubnet = "169.254.0.0/17" + // Set up a fake vsctl command mock interface + kubeMock = kubemocks.Interface{} + fexec = ovntest.NewFakeExec() + err := util.SetExec(fexec) + Expect(err).NotTo(HaveOccurred()) + // Set up a fake k8sMgmt interface + testNS, err = testutils.NewNS() + Expect(err).NotTo(HaveOccurred()) + err = testNS.Do(func(ns.NetNS) error { + defer GinkgoRecover() + ovntest.AddLink(gatewayInterface) + link := ovntest.AddLink(gatewayBridge) + ovntest.AddLink(mgtPort) + addr, _ := netlink.ParseAddr("169.254.169.2/29") + err = netlink.AddrAdd(link, addr) + if err != nil { + return err + } + addr, _ = netlink.ParseAddr("10.0.0.5/24") + err = netlink.AddrAdd(link, addr) + if err != nil { + return err + } + return nil + }) + Expect(err).NotTo(HaveOccurred()) + wg = &sync.WaitGroup{} + stopCh = make(chan struct{}) + routeManager := routemanager.NewController() + wg.Add(1) + go testNS.Do(func(netNS ns.NetNS) error { + defer wg.Done() + routeManager.Run(stopCh, 2*time.Minute) + return nil + }) + ipRulesManager = iprulemanager.NewController(true, true) + wg.Add(1) + go testNS.Do(func(netNS ns.NetNS) error { + defer wg.Done() + ipRulesManager.Run(stopCh, 4*time.Minute) + return nil + }) + vrf = vrfmanager.NewController(routeManager) + wg2 := &sync.WaitGroup{} + defer func() { + wg2.Wait() + }() + wg2.Add(1) + go testNS.Do(func(netNS ns.NetNS) error { + defer wg2.Done() + defer GinkgoRecover() + err = vrf.Run(stopCh, wg) + Expect(err).NotTo(HaveOccurred()) + return nil + }) + }) + AfterEach(func() { + close(stopCh) + wg.Wait() + Expect(testNS.Close()).To(Succeed()) + Expect(testutils.UnmountNS(testNS)).To(Succeed()) + util.ResetRunner() + }) + ovntest.OnSupportedPlatformsIt("ensure UDNGateway and VRFManager and IPRulesManager are invoked for Primary UDNs when feature gate is ON", func() { config.OVNKubernetesFeature.EnableNetworkSegmentation = true config.OVNKubernetesFeature.EnableMultiNetwork = true @@ -280,16 +306,15 @@ var _ = Describe("SecondaryNodeNetworkController", func() { nodeLister := v1mocks.NodeLister{} nodeInformer.On("Lister").Return(&nodeLister) nodeLister.On("Get", mock.AnythingOfType("string")).Return(node, nil) - cnode := node.DeepCopy() - cnode.Annotations[util.OvnNodeManagementPortMacAddresses] = `{"bluenet":"00:00:00:55:66:77"}` - kubeMock.On("UpdateNodeStatus", cnode).Return(nil) By("creating NAD for primary UDN") nad = ovntest.GenerateNAD("bluenet", "rednad", "greenamespace", types.Layer3Topology, "100.128.0.0/16", types.NetworkRolePrimary) NetInfo, err := util.ParseNADInfo(nad) Expect(err).NotTo(HaveOccurred()) - + _, ipNet, err := net.ParseCIDR(v4NodeSubnet) + Expect(err).NotTo(HaveOccurred()) + mgtPortMAC = util.IPAddrToHWAddr(util.GetNodeManagementIfAddr(ipNet).IP).String() By("creating secondary network controller for user defined primary network") cnnci := CommonNodeNetworkControllerInfo{name: nodeName, watchFactory: &factoryMock} controller, err := NewSecondaryNodeNetworkController(&cnnci, NetInfo, vrf, ipRulesManager, &gateway{}) @@ -300,8 +325,9 @@ var _ = Describe("SecondaryNodeNetworkController", func() { err = testNS.Do(func(ns.NetNS) error { defer GinkgoRecover() - getCreationFakeOVSCommands(fexec, mgtPort, mgtPortMAC, netName, nodeName, NetInfo.MTU()) + getCreationFakeCommands(fexec, mgtPort, mgtPortMAC, netName, nodeName, NetInfo.MTU()) getVRFCreationFakeOVSCommands(fexec) + getRPFilterLooseModeFakeCommands(fexec) getDeletionFakeOVSCommands(fexec, mgtPort) By("starting secondary network controller for user defined primary network") @@ -328,7 +354,7 @@ var _ = Describe("SecondaryNodeNetworkController", func() { return err }).WithTimeout(120 * time.Second).Should(BeNil()) - By("check masquerade iprules are created for the network") + By("check iprules are created for the network") rulesFound, err := netlink.RuleList(netlink.FAMILY_ALL) Expect(err).NotTo(HaveOccurred()) var udnRules []netlink.Rule @@ -337,10 +363,10 @@ var _ = Describe("SecondaryNodeNetworkController", func() { udnRules = append(udnRules, rule) } } - Expect(udnRules).To(HaveLen(2)) + Expect(udnRules).To(HaveLen(3)) By("delete the network and ensure its associated VRF device is also deleted") - cnode = node.DeepCopy() + cnode := node.DeepCopy() kubeMock.On("UpdateNodeStatus", cnode).Return(nil) err = controller.Cleanup() Expect(err).NotTo(HaveOccurred()) diff --git a/go-controller/pkg/node/udn_isolation.go b/go-controller/pkg/node/udn_isolation.go new file mode 100644 index 0000000000..a6817bd377 --- /dev/null +++ b/go-controller/pkg/node/udn_isolation.go @@ -0,0 +1,679 @@ +package node + +import ( + "context" + "errors" + "fmt" + "net" + "os" + "path/filepath" + "reflect" + "strings" + "time" + + "github.com/coreos/go-systemd/v22/dbus" + v1 "k8s.io/api/core/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" + coreinformers "k8s.io/client-go/informers/core/v1" + corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + "sigs.k8s.io/knftables" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/controller" + nad "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/network-attach-def-controller" + nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" +) + +const ( + // udn-isolation chain contains rules for udn isolation from the host side. + UDNIsolationChain = "udn-isolation" + // nftables set names + nftablesUDNOpenPortsv4 = "udn-open-ports-v4" + nftablesUDNOpenPortsv6 = "udn-open-ports-v6" + nftablesUDNOpenPortsICMPv4 = "udn-open-ports-icmp-v4" + nftablesUDNOpenPortsICMPv6 = "udn-open-ports-icmp-v6" + nftablesUDNPodIPsv4 = "udn-pod-default-ips-v4" + nftablesUDNPodIPsv6 = "udn-pod-default-ips-v6" +) + +// UDNHostIsolationManager manages the host isolation for user defined networks. +// It uses nftables chain "udn-isolation" to only allow connection to primary UDN pods from kubelet. +// It also listens to systemd events to re-apply the rules after kubelet restart as cgroup matching is used. +type UDNHostIsolationManager struct { + nft knftables.Interface + ipv4, ipv6 bool + podController controller.Controller + podLister corelisters.PodLister + nadController *nad.NetAttachDefinitionController + kubeletCgroupPath string + + udnPodIPsv4 *nftPodElementsSet + udnPodIPsv6 *nftPodElementsSet + + udnOpenPortsv4 *nftPodElementsSet + udnOpenPortsv6 *nftPodElementsSet + + udnOpenPortsICMPv4 *nftPodElementsSet + udnOpenPortsICMPv6 *nftPodElementsSet +} + +func NewUDNHostIsolationManager(ipv4, ipv6 bool, podInformer coreinformers.PodInformer, + nadController *nad.NetAttachDefinitionController) *UDNHostIsolationManager { + m := &UDNHostIsolationManager{ + podLister: podInformer.Lister(), + nadController: nadController, + ipv4: ipv4, + ipv6: ipv6, + udnPodIPsv4: newNFTPodElementsSet(nftablesUDNPodIPsv4, false), + udnPodIPsv6: newNFTPodElementsSet(nftablesUDNPodIPsv6, false), + udnOpenPortsv4: newNFTPodElementsSet(nftablesUDNOpenPortsv4, true), + udnOpenPortsv6: newNFTPodElementsSet(nftablesUDNOpenPortsv6, true), + udnOpenPortsICMPv4: newNFTPodElementsSet(nftablesUDNOpenPortsICMPv4, false), + udnOpenPortsICMPv6: newNFTPodElementsSet(nftablesUDNOpenPortsICMPv6, false), + } + controllerConfig := &controller.ControllerConfig[v1.Pod]{ + RateLimiter: workqueue.NewTypedItemFastSlowRateLimiter[string](time.Second, 5*time.Second, 5), + Informer: podInformer.Informer(), + Lister: podInformer.Lister().List, + ObjNeedsUpdate: podNeedsUpdate, + Reconcile: m.reconcilePod, + Threadiness: 1, + } + m.podController = controller.NewController[v1.Pod]("udn-host-isolation-manager", controllerConfig) + return m +} + +// Start must be called on node setup. +func (m *UDNHostIsolationManager) Start(ctx context.Context) error { + // find kubelet cgroup path. + // kind cluster uses "kubelet.slice/kubelet.service", while OCP cluster uses "system.slice/kubelet.service". + // as long as ovn-k node is running as a privileged container, we can access the host cgroup directory. + err := filepath.WalkDir("/sys/fs/cgroup", func(path string, d os.DirEntry, err error) error { + if err != nil { + return nil + } + if d.Name() == "kubelet.service" { + m.kubeletCgroupPath = strings.TrimPrefix(path, "/sys/fs/cgroup/") + klog.Infof("Found kubelet cgroup path: %s", m.kubeletCgroupPath) + return filepath.SkipAll + } + return nil + }) + if err != nil || m.kubeletCgroupPath == "" { + return fmt.Errorf("failed to find kubelet cgroup path: %w", err) + } + nft, err := nodenft.GetNFTablesHelper() + if err != nil { + return fmt.Errorf("failed getting nftables helper: %w", err) + } + + m.nft = nft + if err = m.setupUDNIsolationFromHost(); err != nil { + return fmt.Errorf("failed to setup UDN host isolation: %w", err) + } + if err = m.runKubeletRestartTracker(ctx); err != nil { + return fmt.Errorf("failed to run kubelet restart tracker: %w", err) + } + return controller.StartWithInitialSync(m.podInitialSync, m.podController) +} + +func (m *UDNHostIsolationManager) Stop() { + controller.Stop(m.podController) +} + +// CleanupUDNHostIsolation removes all nftables chains and sets created by UDNHostIsolationManager. +func CleanupUDNHostIsolation() error { + nft, err := nodenft.GetNFTablesHelper() + if err != nil { + return fmt.Errorf("failed getting nftables helper: %w", err) + } + tx := nft.NewTransaction() + safeDelete(tx, &knftables.Chain{ + Name: UDNIsolationChain, + }) + safeDelete(tx, &knftables.Set{ + Name: nftablesUDNPodIPsv4, + Type: "ipv4_addr", + }) + safeDelete(tx, &knftables.Set{ + Name: nftablesUDNPodIPsv6, + Type: "ipv6_addr", + }) + safeDelete(tx, &knftables.Set{ + Name: nftablesUDNOpenPortsv4, + Type: "ipv4_addr . inet_proto . inet_service", + }) + safeDelete(tx, &knftables.Set{ + Name: nftablesUDNOpenPortsv6, + Type: "ipv6_addr . inet_proto . inet_service", + }) + safeDelete(tx, &knftables.Set{ + Name: nftablesUDNOpenPortsICMPv4, + Type: "ipv4_addr", + }) + safeDelete(tx, &knftables.Set{ + Name: nftablesUDNOpenPortsICMPv6, + Type: "ipv6_addr", + }) + return nft.Run(context.TODO(), tx) +} + +func (m *UDNHostIsolationManager) setupUDNIsolationFromHost() error { + tx := m.nft.NewTransaction() + tx.Add(&knftables.Chain{ + Name: UDNIsolationChain, + Comment: knftables.PtrTo("Host isolation for user defined networks"), + Type: knftables.PtrTo(knftables.FilterType), + Hook: knftables.PtrTo(knftables.OutputHook), + Priority: knftables.PtrTo(knftables.FilterPriority), + }) + tx.Flush(&knftables.Chain{ + Name: UDNIsolationChain, + }) + tx.Add(&knftables.Set{ + Name: nftablesUDNOpenPortsv4, + Comment: knftables.PtrTo("default network open ports of pods in user defined networks (IPv4)"), + Type: "ipv4_addr . inet_proto . inet_service", + }) + tx.Add(&knftables.Set{ + Name: nftablesUDNOpenPortsv6, + Comment: knftables.PtrTo("default network open ports of pods in user defined networks (IPv6)"), + Type: "ipv6_addr . inet_proto . inet_service", + }) + tx.Add(&knftables.Set{ + Name: nftablesUDNOpenPortsICMPv4, + Comment: knftables.PtrTo("default network IPs of pods in user defined networks that allow ICMP (IPv4)"), + Type: "ipv4_addr", + }) + tx.Add(&knftables.Set{ + Name: nftablesUDNOpenPortsICMPv6, + Comment: knftables.PtrTo("default network IPs of pods in user defined networks that allow ICMP (IPv6)"), + Type: "ipv6_addr", + }) + tx.Add(&knftables.Set{ + Name: nftablesUDNPodIPsv4, + Comment: knftables.PtrTo("default network IPs of pods in user defined networks (IPv4)"), + Type: "ipv4_addr", + }) + tx.Add(&knftables.Set{ + Name: nftablesUDNPodIPsv6, + Comment: knftables.PtrTo("default network IPs of pods in user defined networks (IPv6)"), + Type: "ipv6_addr", + }) + m.addRules(tx) + + err := m.nft.Run(context.TODO(), tx) + if err != nil { + return fmt.Errorf("could not setup nftables rules for UDN from host isolation: %v", err) + } + return nil +} + +func (m *UDNHostIsolationManager) addRules(tx *knftables.Transaction) { + if m.ipv4 { + tx.Add(&knftables.Rule{ + Chain: UDNIsolationChain, + Rule: knftables.Concat( + "ip", "daddr", ".", "meta l4proto", ".", "th dport", + "@", nftablesUDNOpenPortsv4, "accept", + ), + }) + tx.Add(&knftables.Rule{ + Chain: UDNIsolationChain, + Rule: knftables.Concat( + "ip", "daddr", "@", nftablesUDNOpenPortsICMPv4, "meta l4proto", "icmp", + "accept", + ), + }) + + tx.Add(&knftables.Rule{ + Chain: UDNIsolationChain, + Rule: knftables.Concat( + "socket", "cgroupv2", "level 2", m.kubeletCgroupPath, + "ip", "daddr", "@", nftablesUDNPodIPsv4, "accept"), + }) + tx.Add(&knftables.Rule{ + Chain: UDNIsolationChain, + Rule: knftables.Concat( + "ip", "daddr", "@", nftablesUDNPodIPsv4, "drop"), + }) + } + if m.ipv6 { + tx.Add(&knftables.Rule{ + Chain: UDNIsolationChain, + Rule: knftables.Concat( + "ip6", "daddr", ".", "meta l4proto", ".", "th dport", + "@", nftablesUDNOpenPortsv6, "accept", + ), + }) + tx.Add(&knftables.Rule{ + Chain: UDNIsolationChain, + Rule: knftables.Concat( + "ip6", "daddr", "@", nftablesUDNOpenPortsICMPv6, "meta l4proto", "icmpv6", + "accept", + ), + }) + tx.Add(&knftables.Rule{ + Chain: UDNIsolationChain, + Rule: knftables.Concat( + "socket", "cgroupv2", "level 2", m.kubeletCgroupPath, + "ip6", "daddr", "@", nftablesUDNPodIPsv6, "accept"), + }) + tx.Add(&knftables.Rule{ + Chain: UDNIsolationChain, + Rule: knftables.Concat( + "ip6", "daddr", "@", nftablesUDNPodIPsv6, "drop"), + }) + } +} + +func (m *UDNHostIsolationManager) updateKubeletCgroup() error { + tx := m.nft.NewTransaction() + tx.Flush(&knftables.Chain{ + Name: UDNIsolationChain, + }) + m.addRules(tx) + + err := m.nft.Run(context.TODO(), tx) + if err != nil { + return fmt.Errorf("could not update nftables rule for management port: %v", err) + } + return nil +} + +// runKubeletRestartTracker listens to systemd events to re-apply the UDN host isolation rules after kubelet restart. +// cgroupv2 match doesn't actually match cgroup paths, but rather resolves them to numeric cgroup IDs when such +// rules are loaded into kernel, and does not automatically update them in any way afterwards. +// From the patch https://patchwork.ozlabs.org/project/netfilter-devel/patch/1479114761-19534-1-git-send-email-pablo@netfilter.org/#1511797: +// If the cgroup is gone, the filtering policy would not match anymore. You only have to subscribe to events +// and perform an incremental updates to tear down the side of the filtering policy that you don't need anymore. +// If a new cgroup is created, you load the filtering policy for the new cgroup and then add +// processes to that cgroup. You only have to follow the right sequence to avoid problems. +func (m *UDNHostIsolationManager) runKubeletRestartTracker(ctx context.Context) (err error) { + conn, err := dbus.NewSystemdConnectionContext(ctx) + if err != nil { + return fmt.Errorf("failed to connect to systemd: %w", err) + } + defer func() { + if err != nil { + conn.Close() + } + }() + + err = conn.Subscribe() + if err != nil { + return fmt.Errorf("failed to subscribe to systemd events: %w", err) + } + // interval is important here as we need to catch the restart state, before it is running again + events, errChan := conn.SubscribeUnitsCustom(50*time.Millisecond, 0, func(u1, u2 *dbus.UnitStatus) bool { return *u1 != *u2 }, + func(s string) bool { + return s != "kubelet.service" + }) + // run until context is cancelled + go func() { + waitingForActive := false + for { + select { + case <-ctx.Done(): + conn.Close() + return + case event := <-events: + for _, status := range event { + if status.ActiveState != "active" { + waitingForActive = true + } else if waitingForActive { + klog.Infof("Kubelet was restarted, re-applying UDN host isolation") + err = m.updateKubeletCgroup() + if err != nil { + klog.Errorf("Failed to re-apply UDN host isolation: %v", err) + } else { + waitingForActive = false + } + } + } + case err := <-errChan: + klog.Errorf("Systemd listener error: %v", err) + } + } + }() + return nil +} + +func (m *UDNHostIsolationManager) podInitialSync() error { + udnPodIPsv4 := map[string]sets.Set[string]{} + udnPodIPsv6 := map[string]sets.Set[string]{} + udnOpenPortsICMPv4 := map[string]sets.Set[string]{} + udnOpenPortsICMPv6 := map[string]sets.Set[string]{} + udnOpenPortsv4 := map[string]sets.Set[string]{} + udnOpenPortsv6 := map[string]sets.Set[string]{} + + pods, err := m.podLister.List(labels.Everything()) + if err != nil { + return fmt.Errorf("failed to list pods: %v", err) + } + + for _, pod := range pods { + podKey, err := cache.MetaNamespaceKeyFunc(pod) + if err != nil { + klog.Warningf("UDNHostIsolationManager failed to get key for pod %s in namespace %s: %v", pod.Name, pod.Namespace, err) + continue + } + // ignore openPorts parse error in initial sync + pi, _, err := m.getPodInfo(podKey, pod) + if err != nil { + return err + } + if pi == nil { + // this pod doesn't need to be updated + continue + } + + udnPodIPsv4[podKey] = pi.ipsv4 + udnPodIPsv6[podKey] = pi.ipsv6 + udnOpenPortsICMPv4[podKey] = pi.icmpv4 + udnOpenPortsICMPv6[podKey] = pi.icmpv6 + udnOpenPortsv4[podKey] = pi.openPortsv4 + udnOpenPortsv6[podKey] = pi.openPortsv6 + } + if err = m.udnPodIPsv4.fullSync(m.nft, udnPodIPsv4); err != nil { + return err + } + if err = m.udnPodIPsv6.fullSync(m.nft, udnPodIPsv6); err != nil { + return err + } + if err = m.udnOpenPortsICMPv4.fullSync(m.nft, udnOpenPortsICMPv4); err != nil { + return err + } + if err = m.udnOpenPortsICMPv6.fullSync(m.nft, udnOpenPortsICMPv6); err != nil { + return err + } + if err = m.udnOpenPortsv4.fullSync(m.nft, udnOpenPortsv4); err != nil { + return err + } + if err = m.udnOpenPortsv6.fullSync(m.nft, udnOpenPortsv6); err != nil { + return err + } + return nil +} + +func podNeedsUpdate(oldObj, newObj *v1.Pod) bool { + if oldObj == nil || newObj == nil { + return true + } + // react to pod IP changes + return !reflect.DeepEqual(oldObj.Status, newObj.Status) || + oldObj.Annotations[util.OvnPodAnnotationName] != newObj.Annotations[util.OvnPodAnnotationName] +} + +func (m *UDNHostIsolationManager) reconcilePod(key string) error { + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + klog.Errorf("UDNHostIsolationManager failed to split meta namespace cache key %s for pod: %v", key, err) + return nil + } + pod, err := m.podLister.Pods(namespace).Get(name) + if err != nil { + if kerrors.IsNotFound(err) { + // Pod was deleted, clean up. + return m.updateWithPodInfo(key, &podInfo{}) + } + return fmt.Errorf("failed to fetch pod %s in namespace %s", name, namespace) + } + pi, parseErr, err := m.getPodInfo(key, pod) + if err != nil { + return err + } + if pi == nil { + // this pod doesn't need to be updated + return nil + } + err = m.updateWithPodInfo(key, pi) + return errors.Join(err, parseErr) +} + +type podInfo struct { + ipsv4 sets.Set[string] + ipsv6 sets.Set[string] + icmpv4 sets.Set[string] + icmpv6 sets.Set[string] + openPortsv4 sets.Set[string] + openPortsv6 sets.Set[string] +} + +// getPodInfo returns nftables set elements for a pod. +// nil is returned when pod should not be updated. +// empty podInfo will delete the pod from all sets and is returned when nil pod is passed. +// first error is for parsing openPorts annotation, second error is for fetching pod IPs. +// parsing error should not stop the update, as we need to cleanup potentially present rules from the previous config. +func (m *UDNHostIsolationManager) getPodInfo(podKey string, pod *v1.Pod) (*podInfo, error, error) { + pi := &podInfo{} + if pod == nil { + return pi, nil, nil + } + // only add pods with primary UDN + primaryUDN, err := m.isPodPrimaryUDN(pod) + if err != nil { + return nil, nil, fmt.Errorf("failed to check if pod %s is in primary UDN: %w", podKey, err) + } + if !primaryUDN { + return nil, nil, nil + } + podIPs, err := util.DefaultNetworkPodIPs(pod) + if err != nil { + // update event should come later with ips + klog.V(5).Infof("Failed to get default network pod IPs for pod %s: %v", podKey, err) + return nil, nil, nil + } + openPorts, parseErr := util.UnmarshalUDNOpenPortsAnnotation(pod.Annotations) + pi.ipsv4, pi.ipsv6 = splitIPsPerFamily(podIPs) + pi.icmpv4, pi.icmpv6, pi.openPortsv4, pi.openPortsv6 = m.getOpenPortSets(pi.ipsv4, pi.ipsv6, openPorts) + return pi, parseErr, nil +} + +// updateWithPodInfo updates the nftables sets with given podInfo for a given pod. +// empty podInfo will delete the pod from all sets. +func (m *UDNHostIsolationManager) updateWithPodInfo(podKey string, pi *podInfo) error { + tx := m.nft.NewTransaction() + m.udnPodIPsv4.updatePodElementsTX(podKey, pi.ipsv4, tx) + m.udnPodIPsv6.updatePodElementsTX(podKey, pi.ipsv6, tx) + m.udnOpenPortsICMPv4.updatePodElementsTX(podKey, pi.icmpv4, tx) + m.udnOpenPortsICMPv6.updatePodElementsTX(podKey, pi.icmpv6, tx) + m.udnOpenPortsv4.updatePodElementsTX(podKey, pi.openPortsv4, tx) + m.udnOpenPortsv6.updatePodElementsTX(podKey, pi.openPortsv6, tx) + + if tx.NumOperations() == 0 { + return nil + } + + err := m.nft.Run(context.TODO(), tx) + if err != nil { + return fmt.Errorf("could not update nftables set for UDN pods: %v", err) + } + + // update internal state only after successful transaction + m.udnPodIPsv4.updatePodElementsAfterTX(podKey, pi.ipsv4) + m.udnPodIPsv6.updatePodElementsAfterTX(podKey, pi.ipsv6) + m.udnOpenPortsICMPv4.updatePodElementsAfterTX(podKey, pi.icmpv4) + m.udnOpenPortsICMPv6.updatePodElementsAfterTX(podKey, pi.icmpv6) + m.udnOpenPortsv4.updatePodElementsAfterTX(podKey, pi.openPortsv4) + m.udnOpenPortsv6.updatePodElementsAfterTX(podKey, pi.openPortsv6) + return nil +} + +func (m *UDNHostIsolationManager) isPodPrimaryUDN(pod *v1.Pod) (bool, error) { + podAnnotation, err := util.UnmarshalPodAnnotation(pod.Annotations, types.DefaultNetworkName) + if err != nil { + // pod IPs were not assigned yet, should be retried later + return false, err + } + // NetworkRoleInfrastructure means default network is not primary, then UDN must be the primary network + return podAnnotation.Role == types.NetworkRoleInfrastructure, nil +} + +func (m *UDNHostIsolationManager) getOpenPortSets(newV4IPs, newV6IPs sets.Set[string], openPorts []*util.OpenPort) (icmpv4, icmpv6, openPortsv4, openPortsv6 sets.Set[string]) { + icmpv4 = sets.New[string]() + icmpv6 = sets.New[string]() + openPortsv4 = sets.New[string]() + openPortsv6 = sets.New[string]() + + for _, openPort := range openPorts { + if openPort.Protocol == "icmp" { + icmpv4 = newV4IPs + icmpv6 = newV6IPs + } else { + for podIPv4 := range newV4IPs { + openPortsv4.Insert(joinNFTSlice([]string{podIPv4, openPort.Protocol, fmt.Sprintf("%d", *openPort.Port)})) + } + for podIPv6 := range newV6IPs { + openPortsv6.Insert(joinNFTSlice([]string{podIPv6, openPort.Protocol, fmt.Sprintf("%d", *openPort.Port)})) + } + } + } + return +} + +// nftPodElementsSet is a helper struct to manage an nftables set with pod-owned elements. +// Can be used to store pod IPs, or more complex elements. +type nftPodElementsSet struct { + setName string + // podName: set elements + podElements map[string]sets.Set[string] + // podIPs may be reused as soon as the pod reaches Terminating state, and delete event may come later. + // That means a new pod with the same IP may be added before the previous pod is deleted. + // To avoid deleting newly-added pod IP thinking we are deleting old pod IP, we keep track of re-used set elements. + elementToPods map[string]sets.Set[string] + // if a set element is composed of multiple strings + // set to false to avoid unneeded parsing + composedValue bool +} + +func newNFTPodElementsSet(setName string, composedValue bool) *nftPodElementsSet { + return &nftPodElementsSet{ + setName: setName, + composedValue: composedValue, + podElements: make(map[string]sets.Set[string]), + elementToPods: make(map[string]sets.Set[string]), + } +} + +func (n *nftPodElementsSet) getKey(key string) []string { + if n.composedValue { + return splitNFTSlice(key) + } + return []string{key} +} + +// updatePodElementsTX adds transaction operations to update pod elements in nftables set. +// To update internal struct, updatePodElementsAfterTX must be called if transaction is successful. +func (n *nftPodElementsSet) updatePodElementsTX(namespacedName string, podElements sets.Set[string], tx *knftables.Transaction) { + if n.podElements[namespacedName].Equal(podElements) { + return + } + // always delete all old elements, then add new elements. + for existingElem := range n.podElements[namespacedName] { + if n.elementToPods[existingElem].Len() == 1 { + // only delete element is it referenced by one pod + tx.Delete(&knftables.Element{ + Set: n.setName, + Key: n.getKey(existingElem), + }) + } + } + for newElem := range podElements { + // adding existing element is a no-op + tx.Add(&knftables.Element{ + Set: n.setName, + Key: n.getKey(newElem), + }) + } +} + +func (n *nftPodElementsSet) updatePodElementsAfterTX(namespacedName string, elements sets.Set[string]) { + for existingElem := range n.podElements[namespacedName] { + if !elements.Has(existingElem) { + // element was removed + n.elementToPods[existingElem].Delete(namespacedName) + if n.elementToPods[existingElem].Len() == 0 { + delete(n.elementToPods, existingElem) + } + } + } + + for elem := range elements { + if n.elementToPods[elem] == nil { + n.elementToPods[elem] = sets.New[string]() + } + n.elementToPods[elem].Insert(namespacedName) + } + if len(elements) == 0 { + delete(n.podElements, namespacedName) + } else { + n.podElements[namespacedName] = elements + } +} + +// fullSync should be called on restart to sync all pods elements. +// It flushes existing elements, and adds new elements. +func (n *nftPodElementsSet) fullSync(nft knftables.Interface, podsElements map[string]sets.Set[string]) error { + tx := nft.NewTransaction() + tx.Flush(&knftables.Set{ + Name: n.setName, + }) + for podName, podElements := range podsElements { + if len(podElements) == 0 { + continue + } + for elem := range podElements { + tx.Add(&knftables.Element{ + Set: n.setName, + Key: n.getKey(elem), + }) + if n.elementToPods[elem] == nil { + n.elementToPods[elem] = sets.New[string]() + } + n.elementToPods[elem].Insert(podName) + } + n.podElements[podName] = podElements + } + err := nft.Run(context.TODO(), tx) + if err != nil { + clear(n.podElements) + return fmt.Errorf("initial pods sync for UDN host isolation failed: %w", err) + } + return nil +} + +func splitIPsPerFamily(podIPs []net.IP) (sets.Set[string], sets.Set[string]) { + newV4IPs := sets.New[string]() + newV6IPs := sets.New[string]() + for _, podIP := range podIPs { + if podIP.To4() != nil { + newV4IPs.Insert(podIP.String()) + } else { + newV6IPs.Insert(podIP.String()) + } + } + return newV4IPs, newV6IPs +} + +func safeDelete(tx *knftables.Transaction, obj knftables.Object) { + tx.Add(obj) + tx.Delete(obj) +} + +// joinNFTSlice converts nft element key or value (type []string) to string to store in the nftElementStorage. +// The separator is the same as the one used by nft commands, so we know that the parsing is going to be unambiguous. +func joinNFTSlice(k []string) string { + return strings.Join(k, " . ") +} + +// splitNFTSlice converts nftElementStorage key or value string representation back to slice. +func splitNFTSlice(k string) []string { + return strings.Split(k, " . ") +} diff --git a/go-controller/pkg/node/udn_isolation_test.go b/go-controller/pkg/node/udn_isolation_test.go new file mode 100644 index 0000000000..35daee2ae9 --- /dev/null +++ b/go-controller/pkg/node/udn_isolation_test.go @@ -0,0 +1,506 @@ +package node + +import ( + "context" + "fmt" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "net" + "sigs.k8s.io/yaml" + "strings" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ktypes "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "sigs.k8s.io/knftables" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/controller" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" + networkAttachDefController "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/network-attach-def-controller" + nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/nad" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" +) + +var _ = Describe("nftPodElementsSet", func() { + const setName = "test-set" + var nft *knftables.Fake + + for _, composed := range []bool{false, true} { + Context(fmt.Sprintf("composed=%v", composed), func() { + composed := composed + setType := "ipv4_addr" + if composed { + setType = "ipv4_addr . inet_proto . inet_service" + } + + BeforeEach(func() { + nft = nodenft.SetFakeNFTablesHelper() + tx := nft.NewTransaction() + tx.Add(&knftables.Set{ + Name: setName, + Type: setType, + }) + Expect(nft.Run(context.Background(), tx)).To(Succeed()) + }) + + getElem := func(ip string) string { + if !composed { + return ip + } + return fmt.Sprintf("%s . tcp . 8080", ip) + } + + getElemKey := func(ip string) []string { + if !composed { + return []string{ip} + } + return []string{ip, "tcp", "8080"} + } + + getExpectedDump := func(ips ...string) string { + result := fmt.Sprintf(`add table inet ovn-kubernetes +add set inet ovn-kubernetes %s { type %s ; } +`, setName, setType) + for _, ip := range ips { + result += fmt.Sprintf("add element inet ovn-kubernetes %s { %s }\n", setName, getElem(ip)) + } + return result + } + + It("fullSync should update sets and build local cache", func() { + tx := nft.NewTransaction() + tx.Add(&knftables.Element{ + Set: setName, + Key: getElemKey("1.1.1.1"), + }) + tx.Add(&knftables.Element{ + Set: setName, + Key: getElemKey("1.1.1.2"), + }) + Expect(nft.Run(context.Background(), tx)).To(Succeed()) + + s := newNFTPodElementsSet(setName, composed) + newElems := map[string]sets.Set[string]{} + newElems["ns1/pod1"] = sets.New(getElem("1.1.1.2")) + newElems["ns1/pod2"] = sets.New(getElem("1.1.1.2")) + newElems["ns2/pod1"] = sets.New(getElem("1.1.1.3")) + Expect(s.fullSync(nft, newElems)).To(Succeed()) + Expect(nodenft.MatchNFTRules(getExpectedDump("1.1.1.2", "1.1.1.3"), nft.Dump())).To(Succeed()) + + Expect(s.podElements).To(Equal(newElems)) + Expect(s.elementToPods).To(Equal(map[string]sets.Set[string]{ + getElem("1.1.1.2"): sets.New("ns1/pod1", "ns1/pod2"), + getElem("1.1.1.3"): sets.New("ns2/pod1"), + })) + }) + + It("updatePodElements should update sets and local cache on pod add", func() { + s := newNFTPodElementsSet(setName, false) + tx := nft.NewTransaction() + + s.updatePodElementsTX("ns1/pod1", sets.New(getElem("1.1.1.1")), tx) + Expect(nft.Run(context.Background(), tx)).To(Succeed()) + s.updatePodElementsAfterTX("ns1/pod1", sets.New(getElem("1.1.1.1"))) + + Expect(nodenft.MatchNFTRules(getExpectedDump("1.1.1.1"), nft.Dump())).To(Succeed()) + Expect(s.podElements).To(Equal(map[string]sets.Set[string]{ + "ns1/pod1": sets.New(getElem("1.1.1.1")), + })) + Expect(s.elementToPods).To(Equal(map[string]sets.Set[string]{ + getElem("1.1.1.1"): sets.New("ns1/pod1"), + })) + + s.updatePodElementsTX("ns2/pod1", sets.New(getElem("1.1.1.1")), tx) + Expect(nft.Run(context.Background(), tx)).To(Succeed()) + s.updatePodElementsAfterTX("ns2/pod1", sets.New(getElem("1.1.1.1"))) + + Expect(nodenft.MatchNFTRules(getExpectedDump("1.1.1.1"), nft.Dump())).To(Succeed()) + Expect(s.podElements).To(Equal(map[string]sets.Set[string]{ + "ns1/pod1": sets.New(getElem("1.1.1.1")), + "ns2/pod1": sets.New(getElem("1.1.1.1")), + })) + Expect(s.elementToPods).To(Equal(map[string]sets.Set[string]{ + getElem("1.1.1.1"): sets.New("ns1/pod1", "ns2/pod1"), + })) + }) + + It("updatePodElements should update sets and local cache on pod update", func() { + s := newNFTPodElementsSet(setName, false) + tx := nft.NewTransaction() + //setup existing pod IPs + newElems := map[string]sets.Set[string]{} + newElems["ns1/pod1"] = sets.New(getElem("1.1.1.2")) + newElems["ns1/pod2"] = sets.New(getElem("1.1.1.2")) + newElems["ns2/pod1"] = sets.New(getElem("1.1.1.3")) + Expect(s.fullSync(nft, newElems)).To(Succeed()) + + s.updatePodElementsTX("ns1/pod1", sets.New(getElem("1.1.1.1")), tx) + Expect(nft.Run(context.Background(), tx)).To(Succeed()) + s.updatePodElementsAfterTX("ns1/pod1", sets.New(getElem("1.1.1.1"))) + + Expect(nodenft.MatchNFTRules(getExpectedDump("1.1.1.1", "1.1.1.2", "1.1.1.3"), nft.Dump())).To(Succeed()) + Expect(s.podElements).To(Equal(map[string]sets.Set[string]{ + "ns1/pod1": sets.New(getElem("1.1.1.1")), + "ns1/pod2": sets.New(getElem("1.1.1.2")), + "ns2/pod1": sets.New(getElem("1.1.1.3")), + })) + Expect(s.elementToPods).To(Equal(map[string]sets.Set[string]{ + getElem("1.1.1.1"): sets.New("ns1/pod1"), + getElem("1.1.1.2"): sets.New("ns1/pod2"), + getElem("1.1.1.3"): sets.New("ns2/pod1"), + })) + + s.updatePodElementsTX("ns2/pod1", sets.New(getElem("1.1.1.4")), tx) + Expect(nft.Run(context.Background(), tx)).To(Succeed()) + s.updatePodElementsAfterTX("ns2/pod1", sets.New(getElem("1.1.1.4"))) + Expect(nodenft.MatchNFTRules(getExpectedDump("1.1.1.1", "1.1.1.2", "1.1.1.4"), nft.Dump())).To(Succeed()) + Expect(s.podElements).To(Equal(map[string]sets.Set[string]{ + "ns1/pod1": sets.New(getElem("1.1.1.1")), + "ns1/pod2": sets.New(getElem("1.1.1.2")), + "ns2/pod1": sets.New(getElem("1.1.1.4")), + })) + Expect(s.elementToPods).To(Equal(map[string]sets.Set[string]{ + getElem("1.1.1.1"): sets.New("ns1/pod1"), + getElem("1.1.1.2"): sets.New("ns1/pod2"), + getElem("1.1.1.4"): sets.New("ns2/pod1"), + })) + }) + + It("updatePodElements should update sets and local cache on pod delete", func() { + s := newNFTPodElementsSet(setName, false) + tx := nft.NewTransaction() + //setup existing pod IPs + newElems := map[string]sets.Set[string]{} + newElems["ns1/pod1"] = sets.New(getElem("1.1.1.2")) + newElems["ns1/pod2"] = sets.New(getElem("1.1.1.2")) + newElems["ns2/pod1"] = sets.New(getElem("1.1.1.3")) + Expect(s.fullSync(nft, newElems)).To(Succeed()) + + s.updatePodElementsTX("ns1/pod1", sets.New[string](), tx) + Expect(nft.Run(context.Background(), tx)).To(Succeed()) + s.updatePodElementsAfterTX("ns1/pod1", sets.New[string]()) + + Expect(nodenft.MatchNFTRules(getExpectedDump("1.1.1.2", "1.1.1.3"), nft.Dump())).To(Succeed()) + Expect(s.podElements).To(Equal(map[string]sets.Set[string]{ + "ns1/pod2": sets.New(getElem("1.1.1.2")), + "ns2/pod1": sets.New(getElem("1.1.1.3")), + })) + Expect(s.elementToPods).To(Equal(map[string]sets.Set[string]{ + getElem("1.1.1.2"): sets.New("ns1/pod2"), + getElem("1.1.1.3"): sets.New("ns2/pod1"), + })) + + s.updatePodElementsTX("ns2/pod1", sets.New[string](), tx) + Expect(nft.Run(context.Background(), tx)).To(Succeed()) + s.updatePodElementsAfterTX("ns2/pod1", sets.New[string]()) + Expect(nodenft.MatchNFTRules(getExpectedDump("1.1.1.2"), nft.Dump())).To(Succeed()) + Expect(s.podElements).To(Equal(map[string]sets.Set[string]{ + "ns1/pod2": sets.New(getElem("1.1.1.2")), + })) + Expect(s.elementToPods).To(Equal(map[string]sets.Set[string]{ + getElem("1.1.1.2"): sets.New("ns1/pod2"), + })) + }) + }) + } +}) + +var _ = Describe("UDN Host isolation", func() { + var ( + manager *UDNHostIsolationManager + nadController *networkAttachDefController.NetAttachDefinitionController + wf *factory.WatchFactory + fakeClient *util.OVNNodeClientset + nft *knftables.Fake + ) + + const ( + nadNamespace = "nad-namespace" + defaultNamespace = "default-namespace" + ) + + getExpectedDump := func(v4ips, v6ips []string) string { + result := + `add table inet ovn-kubernetes +add chain inet ovn-kubernetes udn-isolation { type filter hook output priority 0 ; comment "Host isolation for user defined networks" ; } +add set inet ovn-kubernetes udn-open-ports-icmp-v4 { type ipv4_addr ; comment "default network IPs of pods in user defined networks that allow ICMP (IPv4)" ; } +add set inet ovn-kubernetes udn-open-ports-icmp-v6 { type ipv6_addr ; comment "default network IPs of pods in user defined networks that allow ICMP (IPv6)" ; } +add set inet ovn-kubernetes udn-open-ports-v4 { type ipv4_addr . inet_proto . inet_service ; comment "default network open ports of pods in user defined networks (IPv4)" ; } +add set inet ovn-kubernetes udn-open-ports-v6 { type ipv6_addr . inet_proto . inet_service ; comment "default network open ports of pods in user defined networks (IPv6)" ; } +add set inet ovn-kubernetes udn-pod-default-ips-v4 { type ipv4_addr ; comment "default network IPs of pods in user defined networks (IPv4)" ; } +add set inet ovn-kubernetes udn-pod-default-ips-v6 { type ipv6_addr ; comment "default network IPs of pods in user defined networks (IPv6)" ; } +add rule inet ovn-kubernetes udn-isolation ip daddr . meta l4proto . th dport @udn-open-ports-v4 accept +add rule inet ovn-kubernetes udn-isolation ip daddr @udn-open-ports-icmp-v4 meta l4proto icmp accept +add rule inet ovn-kubernetes udn-isolation socket cgroupv2 level 2 kubelet.slice/kubelet.service ip daddr @udn-pod-default-ips-v4 accept +add rule inet ovn-kubernetes udn-isolation ip daddr @udn-pod-default-ips-v4 drop +add rule inet ovn-kubernetes udn-isolation ip6 daddr . meta l4proto . th dport @udn-open-ports-v6 accept +add rule inet ovn-kubernetes udn-isolation ip6 daddr @udn-open-ports-icmp-v6 meta l4proto icmpv6 accept +add rule inet ovn-kubernetes udn-isolation socket cgroupv2 level 2 kubelet.slice/kubelet.service ip6 daddr @udn-pod-default-ips-v6 accept +add rule inet ovn-kubernetes udn-isolation ip6 daddr @udn-pod-default-ips-v6 drop +` + for _, ip := range v4ips { + result += fmt.Sprintf("add element inet ovn-kubernetes udn-pod-default-ips-v4 { %s }\n", ip) + } + for _, ip := range v6ips { + result += fmt.Sprintf("add element inet ovn-kubernetes udn-pod-default-ips-v6 { %s }\n", ip) + } + + return result + } + + getExpectedDumpWithOpenPorts := func(v4ips, v6ips []string, openPorts map[string][]*util.OpenPort) string { + result := getExpectedDump(v4ips, v6ips) + for ip, openPorts := range openPorts { + netIP := net.ParseIP(ip) + for _, openPort := range openPorts { + if openPort.Protocol == "icmp" { + if netIP.To4() != nil { + result += fmt.Sprintf("add element inet ovn-kubernetes udn-open-ports-icmp-v4 { %s }\n", ip) + } else { + result += fmt.Sprintf("add element inet ovn-kubernetes udn-open-ports-icmp-v6 { %s }\n", ip) + } + } else { + if netIP.To4() != nil { + result += fmt.Sprintf("add element inet ovn-kubernetes udn-open-ports-v4 { %s . %s . %d }\n", ip, openPort.Protocol, *openPort.Port) + } else { + result += fmt.Sprintf("add element inet ovn-kubernetes udn-open-ports-v6 { %s . %s . %d }\n", ip, openPort.Protocol, *openPort.Port) + } + } + } + } + return result + } + + start := func(objects ...runtime.Object) { + fakeClient = util.GetOVNClientset(objects...).GetNodeClientset() + var err error + wf, err = factory.NewNodeWatchFactory(fakeClient, "node1") + Expect(err).NotTo(HaveOccurred()) + + testNCM := &nad.FakeNetworkControllerManager{} + nadController, err = networkAttachDefController.NewNetAttachDefinitionController("test", testNCM, wf, nil) + Expect(err).NotTo(HaveOccurred()) + + manager = NewUDNHostIsolationManager(true, true, wf.PodCoreInformer(), nadController) + + err = wf.Start() + Expect(err).NotTo(HaveOccurred()) + err = nadController.Start() + Expect(err).NotTo(HaveOccurred()) + + // Copy manager.Start() sequence, but using fake nft and without running systemd tracker + manager.kubeletCgroupPath = "kubelet.slice/kubelet.service" + nft = nodenft.SetFakeNFTablesHelper() + manager.nft = nft + err = manager.setupUDNIsolationFromHost() + Expect(err).NotTo(HaveOccurred()) + err = controller.StartWithInitialSync(manager.podInitialSync, manager.podController) + Expect(err).NotTo(HaveOccurred()) + } + + BeforeEach(func() { + config.PrepareTestConfig() + config.OVNKubernetesFeature.EnableNetworkSegmentation = true + config.OVNKubernetesFeature.EnableMultiNetwork = true + config.IPv4Mode = true + config.IPv6Mode = true + + wf = nil + manager = nil + nadController = nil + }) + + AfterEach(func() { + if wf != nil { + wf.Shutdown() + } + if manager != nil { + manager.Stop() + } + if nadController != nil { + nadController.Stop() + } + }) + + It("correctly generates initial rules", func() { + start() + Expect(nft.Dump()).To(Equal(getExpectedDump(nil, nil))) + }) + + Context("updates pod IPs", func() { + It("on restart", func() { + start( + newPodWithIPs(nadNamespace, "pod1", true, []string{"1.1.1.1", "2014:100:200::1"}), + newPodWithIPs(nadNamespace, "pod2", true, []string{"1.1.1.2"}), + newPodWithIPs(defaultNamespace, "pod3", false, []string{"1.1.1.3"})) + err := nodenft.MatchNFTRules(getExpectedDump([]string{"1.1.1.1", "1.1.1.2"}, []string{"2014:100:200::1"}), nft.Dump()) + Expect(err).NotTo(HaveOccurred()) + }) + + It("on pod add", func() { + start( + newPodWithIPs(nadNamespace, "pod1", true, []string{"1.1.1.1", "2014:100:200::1"})) + err := nodenft.MatchNFTRules(getExpectedDump([]string{"1.1.1.1"}, []string{"2014:100:200::1"}), nft.Dump()) + Expect(err).NotTo(HaveOccurred()) + _, err = fakeClient.KubeClient.CoreV1().Pods(nadNamespace).Create(context.TODO(), + newPodWithIPs(nadNamespace, "pod2", true, []string{"1.1.1.2", "2014:100:200::2"}), metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + Eventually(func() error { + return nodenft.MatchNFTRules(getExpectedDump([]string{"1.1.1.1", "1.1.1.2"}, []string{"2014:100:200::1", "2014:100:200::2"}), nft.Dump()) + }).Should(Succeed()) + _, err = fakeClient.KubeClient.CoreV1().Pods(defaultNamespace).Create(context.TODO(), + newPodWithIPs(defaultNamespace, "pod3", false, []string{"1.1.1.3", "2014:100:200::3"}), metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + Consistently(func() error { + return nodenft.MatchNFTRules(getExpectedDump([]string{"1.1.1.1", "1.1.1.2"}, []string{"2014:100:200::1", "2014:100:200::2"}), nft.Dump()) + }).Should(Succeed()) + }) + + It("on pod delete", func() { + start( + newPodWithIPs(nadNamespace, "pod1", true, []string{"1.1.1.1", "2014:100:200::1"}), + newPodWithIPs(nadNamespace, "pod2", true, []string{"1.1.1.2", "2014:100:200::2"}), + newPodWithIPs(defaultNamespace, "pod3", false, []string{"1.1.1.2"})) + err := nodenft.MatchNFTRules(getExpectedDump([]string{"1.1.1.1", "1.1.1.2"}, []string{"2014:100:200::1", "2014:100:200::2"}), nft.Dump()) + Expect(err).NotTo(HaveOccurred()) + err = fakeClient.KubeClient.CoreV1().Pods(defaultNamespace).Delete(context.TODO(), "pod3", metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + Consistently(func() error { + return nodenft.MatchNFTRules(getExpectedDump([]string{"1.1.1.1", "1.1.1.2"}, []string{"2014:100:200::1", "2014:100:200::2"}), nft.Dump()) + }).Should(Succeed()) + + err = fakeClient.KubeClient.CoreV1().Pods(nadNamespace).Delete(context.TODO(), "pod2", metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + Eventually(func() error { + return nodenft.MatchNFTRules(getExpectedDump([]string{"1.1.1.1"}, []string{"2014:100:200::1"}), nft.Dump()) + }).Should(Succeed()) + }) + }) + + Context("updates open ports", func() { + intRef := func(i int) *int { + return &i + } + + It("on restart", func() { + start( + newPodWithIPs(nadNamespace, "pod1", true, []string{"1.1.1.1", "2014:100:200::1"}, util.OpenPort{Protocol: "tcp", Port: intRef(80)}), + newPodWithIPs(nadNamespace, "pod2", true, []string{"1.1.1.2"}, util.OpenPort{Protocol: "icmp"}), + newPodWithIPs(defaultNamespace, "pod3", false, []string{"1.1.1.3"})) + err := nodenft.MatchNFTRules(getExpectedDumpWithOpenPorts([]string{"1.1.1.1", "1.1.1.2"}, []string{"2014:100:200::1"}, map[string][]*util.OpenPort{ + "1.1.1.1": {{Protocol: "tcp", Port: intRef(80)}}, + "2014:100:200::1": {{Protocol: "tcp", Port: intRef(80)}}, + "1.1.1.2": {{Protocol: "icmp"}}, + }), nft.Dump()) + Expect(err).NotTo(HaveOccurred()) + }) + + It("on pod add", func() { + start( + newPodWithIPs(nadNamespace, "pod1", true, []string{"1.1.1.1", "2014:100:200::1"}, util.OpenPort{Protocol: "tcp", Port: intRef(80)})) + err := nodenft.MatchNFTRules(getExpectedDumpWithOpenPorts([]string{"1.1.1.1"}, []string{"2014:100:200::1"}, map[string][]*util.OpenPort{ + "1.1.1.1": {{Protocol: "tcp", Port: intRef(80)}}, + "2014:100:200::1": {{Protocol: "tcp", Port: intRef(80)}}, + }), nft.Dump()) + Expect(err).NotTo(HaveOccurred()) + _, err = fakeClient.KubeClient.CoreV1().Pods(nadNamespace).Create(context.TODO(), + newPodWithIPs(nadNamespace, "pod2", true, []string{"1.1.1.2", "2014:100:200::2"}, util.OpenPort{Protocol: "icmp"}), metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + Eventually(func() error { + return nodenft.MatchNFTRules(getExpectedDumpWithOpenPorts([]string{"1.1.1.1", "1.1.1.2"}, []string{"2014:100:200::1", "2014:100:200::2"}, map[string][]*util.OpenPort{ + "1.1.1.1": {{Protocol: "tcp", Port: intRef(80)}}, + "2014:100:200::1": {{Protocol: "tcp", Port: intRef(80)}}, + "1.1.1.2": {{Protocol: "icmp"}}, + "2014:100:200::2": {{Protocol: "icmp"}}, + }), nft.Dump()) + }).Should(Succeed()) + _, err = fakeClient.KubeClient.CoreV1().Pods(defaultNamespace).Create(context.TODO(), + newPodWithIPs(defaultNamespace, "pod3", false, []string{"1.1.1.3", "2014:100:200::3"}, util.OpenPort{Protocol: "icmp"}), metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + Consistently(func() error { + return nodenft.MatchNFTRules(getExpectedDumpWithOpenPorts([]string{"1.1.1.1", "1.1.1.2"}, []string{"2014:100:200::1", "2014:100:200::2"}, map[string][]*util.OpenPort{ + "1.1.1.1": {{Protocol: "tcp", Port: intRef(80)}}, + "2014:100:200::1": {{Protocol: "tcp", Port: intRef(80)}}, + "1.1.1.2": {{Protocol: "icmp"}}, + "2014:100:200::2": {{Protocol: "icmp"}}, + }), nft.Dump()) + }).Should(Succeed()) + }) + + It("on pod delete", func() { + start( + newPodWithIPs(nadNamespace, "pod1", true, []string{"1.1.1.1", "2014:100:200::1"}, util.OpenPort{Protocol: "tcp", Port: intRef(80)}), + newPodWithIPs(nadNamespace, "pod2", true, []string{"1.1.1.2", "2014:100:200::2"}, util.OpenPort{Protocol: "icmp"}), + newPodWithIPs(defaultNamespace, "pod3", false, []string{"1.1.1.2"})) + err := nodenft.MatchNFTRules(getExpectedDumpWithOpenPorts([]string{"1.1.1.1", "1.1.1.2"}, []string{"2014:100:200::1", "2014:100:200::2"}, map[string][]*util.OpenPort{ + "1.1.1.1": {{Protocol: "tcp", Port: intRef(80)}}, + "2014:100:200::1": {{Protocol: "tcp", Port: intRef(80)}}, + "1.1.1.2": {{Protocol: "icmp"}}, + "2014:100:200::2": {{Protocol: "icmp"}}, + }), nft.Dump()) + Expect(err).NotTo(HaveOccurred()) + err = fakeClient.KubeClient.CoreV1().Pods(defaultNamespace).Delete(context.TODO(), "pod3", metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + Consistently(func() error { + return nodenft.MatchNFTRules(getExpectedDumpWithOpenPorts([]string{"1.1.1.1", "1.1.1.2"}, []string{"2014:100:200::1", "2014:100:200::2"}, map[string][]*util.OpenPort{ + "1.1.1.1": {{Protocol: "tcp", Port: intRef(80)}}, + "2014:100:200::1": {{Protocol: "tcp", Port: intRef(80)}}, + "1.1.1.2": {{Protocol: "icmp"}}, + "2014:100:200::2": {{Protocol: "icmp"}}, + }), nft.Dump()) + }).Should(Succeed()) + + err = fakeClient.KubeClient.CoreV1().Pods(nadNamespace).Delete(context.TODO(), "pod2", metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + Eventually(func() error { + return nodenft.MatchNFTRules(getExpectedDumpWithOpenPorts([]string{"1.1.1.1"}, []string{"2014:100:200::1"}, map[string][]*util.OpenPort{ + "1.1.1.1": {{Protocol: "tcp", Port: intRef(80)}}, + "2014:100:200::1": {{Protocol: "tcp", Port: intRef(80)}}, + }), nft.Dump()) + }).Should(Succeed()) + }) + }) +}) + +func getOpenPortAnnotation(openPorts []util.OpenPort) map[string]string { + res, err := yaml.Marshal(openPorts) + Expect(err).NotTo(HaveOccurred()) + anno := make(map[string]string) + if len(res) > 0 { + anno[util.UDNOpenPortsAnnotationName] = string(res) + } + return anno +} + +// newPodWithIPs creates a new pod with the given IPs, only filled for default network. +func newPodWithIPs(namespace, name string, primaryUDN bool, ips []string, openPorts ...util.OpenPort) *v1.Pod { + annoPodIPs := make([]string, len(ips)) + for i, ip := range ips { + if net.ParseIP(ip).To4() != nil { + annoPodIPs[i] = "\"" + ip + "/24\"" + } else { + annoPodIPs[i] = "\"" + ip + "/64\"" + } + } + annotations := getOpenPortAnnotation(openPorts) + role := types.NetworkRolePrimary + if primaryUDN { + role = types.NetworkRoleInfrastructure + } + annotations[util.OvnPodAnnotationName] = fmt.Sprintf(`{"default": {"role": "%s", "ip_addresses":[%s], "mac_address":"0a:58:0a:f4:02:03"}}`, + role, strings.Join(annoPodIPs, ",")) + + return &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + UID: ktypes.UID(name), + Namespace: namespace, + Annotations: annotations, + }, + } +} diff --git a/go-controller/pkg/node/vrfmanager/vrf_manager.go b/go-controller/pkg/node/vrfmanager/vrf_manager.go index 69436e314e..8ea9793bf1 100644 --- a/go-controller/pkg/node/vrfmanager/vrf_manager.go +++ b/go-controller/pkg/node/vrfmanager/vrf_manager.go @@ -8,6 +8,7 @@ import ( "time" "github.com/containernetworking/plugins/pkg/ns" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/routemanager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" @@ -18,6 +19,11 @@ import ( "k8s.io/klog/v2" ) +// reconcile period for vrf manager, this would kick in for every 60 seconds if there is no +// explicit link update events. In the event of link update, reconcile period is automatically +// extended by another 60 seconds. +var reconcilePeriod = 60 * time.Second + type vrf struct { name string table uint32 @@ -82,13 +88,13 @@ func (vrfm *Controller) runInternal(stopChan <-chan struct{}, doneWg *sync.WaitG go func() { defer doneWg.Done() err = currentNs.Do(func(netNS ns.NetNS) error { - linkSyncTimer := time.NewTicker(60 * time.Second) + linkSyncTimer := time.NewTicker(reconcilePeriod) defer linkSyncTimer.Stop() for { select { case linkUpdateEvent, ok := <-linkUpdateCh: - linkSyncTimer.Reset(60 * time.Second) + linkSyncTimer.Reset(reconcilePeriod) if !ok { if subscribed, linkUpdateCh, err = subscribe(); err != nil { klog.Errorf("VRF Manager: Error during netlink re-subscribe due to channel closing: %v", err) @@ -96,7 +102,7 @@ func (vrfm *Controller) runInternal(stopChan <-chan struct{}, doneWg *sync.WaitG continue } ifName := linkUpdateEvent.Link.Attrs().Name - klog.V(3).Infof("VRF Manager: link update received for interface %s", ifName) + klog.V(5).Infof("VRF Manager: link update received for interface %s", ifName) err = vrfm.syncVRF(linkUpdateEvent.Link) if err != nil { klog.Errorf("VRF Manager: Error syncing link %s update event, err: %v", ifName, err) @@ -268,6 +274,26 @@ func (vrfm *Controller) AddVRF(name string, slaveInterface string, table uint32, return vrfm.sync(vrfDev) } +// AddVRFRoutes adds routes to the specified VRF +func (vrfm *Controller) AddVRFRoutes(name string, routes []netlink.Route) error { + vrfm.mu.Lock() + defer vrfm.mu.Unlock() + + vrfLink, err := util.GetNetLinkOps().LinkByName(name) + if err != nil { + return fmt.Errorf("failed to retrieve VRF device %s, err: %v", name, err) + } + + vrfDev, ok := vrfm.vrfs[vrfLink.Attrs().Index] + if !ok { + return fmt.Errorf("failed to find VRF %s", name) + } + + vrfDev.routes = append(vrfDev.routes, routes...) + + return vrfm.sync(vrfDev) +} + // Repair deletes stale VRF device(s) on the host. This helps remove // device(s) for which DeleteVRF is never invoked. // Assumptions: 1) The validVRFs list must contain device for which AddVRF @@ -293,11 +319,12 @@ func (vrfm *Controller) repair(validVRFs sets.Set[string]) error { !strings.HasPrefix(name, types.UDNVRFDevicePrefix) { continue } - if !validVRFs.Has(name) { - err = util.GetNetLinkOps().LinkDelete(link) - if err != nil { - klog.Errorf("VRF Manager: error deleting stale VRF device %s, err: %v", name, err) - } + if validVRFs.Has(name) { + continue + } + err = util.GetNetLinkOps().LinkDelete(link) + if err != nil { + klog.Errorf("VRF Manager: error deleting stale VRF device %s, err: %v", name, err) } delete(vrfm.vrfs, link.Attrs().Index) } diff --git a/go-controller/pkg/node/vrfmanager/vrf_manager_suite_test.go b/go-controller/pkg/node/vrfmanager/vrf_manager_suite_test.go index 70a2b09baf..0d465c4235 100644 --- a/go-controller/pkg/node/vrfmanager/vrf_manager_suite_test.go +++ b/go-controller/pkg/node/vrfmanager/vrf_manager_suite_test.go @@ -3,7 +3,7 @@ package vrfmanager import ( "testing" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/node/vrfmanager/vrf_manager_test.go b/go-controller/pkg/node/vrfmanager/vrf_manager_test.go index cc1292f1e2..179c188deb 100644 --- a/go-controller/pkg/node/vrfmanager/vrf_manager_test.go +++ b/go-controller/pkg/node/vrfmanager/vrf_manager_test.go @@ -2,10 +2,15 @@ package vrfmanager import ( "fmt" + "sync" + "time" - "github.com/onsi/ginkgo" + "github.com/containernetworking/plugins/pkg/ns" + "github.com/containernetworking/plugins/pkg/testutils" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/routemanager" + ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" netlink_mocks "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/mocks/github.com/vishvananda/netlink" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/mocks" @@ -14,14 +19,17 @@ import ( "k8s.io/apimachinery/pkg/util/sets" ) +var ( + c *Controller + vrfLinkName1 = "mp100-udn-vrf" + vrfLinkName2 = "mp200-udn-vrf" +) + var _ = ginkgo.Describe("VRF manager", func() { var ( - c *Controller - vrfLinkName1 = "100-vrf" enslaveLinkName1 = "dev100" enslaveLinkName2 = "dev101" - vrfLinkName2 = "200-vrf" nlMock *mocks.NetLinkOps vrfLinkMock1 *netlink_mocks.Link enslaveLinkMock1 *netlink_mocks.Link @@ -116,6 +124,9 @@ var _ = ginkgo.Describe("VRF manager", func() { enslaveLinkMock1.On("Type").Return("dummy") err = c.reconcile() gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + // Invoke reconcile again to ensure both vrf links in sync. + err = c.reconcile() + gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) }) ginkgo.It("repair VRFs", func() { @@ -130,3 +141,98 @@ var _ = ginkgo.Describe("VRF manager", func() { }) }) }) + +var _ = ginkgo.Describe("VRF manager tests with a network namespace", func() { + var ( + testNS ns.NetNS + stopCh chan struct{} + wg *sync.WaitGroup + ) + ginkgo.BeforeEach(func() { + var err error + testNS, err = testutils.NewNS() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + wg = &sync.WaitGroup{} + stopCh = make(chan struct{}) + routeManager := routemanager.NewController() + wg.Add(1) + go testNS.Do(func(netNS ns.NetNS) error { + defer wg.Done() + routeManager.Run(stopCh, 2*time.Minute) + return nil + }) + // set vrf manager reconcile period into one second. + reconcilePeriod = 1 * time.Second + c = NewController(routeManager) + wg2 := &sync.WaitGroup{} + defer func() { + wg2.Wait() + }() + wg2.Add(1) + go testNS.Do(func(netNS ns.NetNS) error { + defer func() { + ginkgo.GinkgoRecover() + wg2.Done() + }() + err = c.Run(stopCh, wg) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return nil + }) + }) + ginkgo.AfterEach(func() { + close(stopCh) + wg.Wait() + gomega.Expect(testNS.Close()).To(gomega.Succeed()) + gomega.Expect(testutils.UnmountNS(testNS)).To(gomega.Succeed()) + util.ResetRunner() + }) + + checkforVrfLinkExistence := func() error { + err := testNS.Do(func(ns.NetNS) error { + if _, err := util.GetNetLinkOps().LinkByName(vrfLinkName1); err != nil { + return err + } + _, err := util.GetNetLinkOps().LinkByName(vrfLinkName2) + return err + }) + return err + } + + ovntest.OnSupportedPlatformsIt("ensure VRF manager is reconciling configured VRF devices correctly", func() { + err := testNS.Do(func(ns.NetNS) error { + defer ginkgo.GinkgoRecover() + err := c.AddVRF(vrfLinkName1, "", 10, nil) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + err = c.AddVRF(vrfLinkName2, "", 20, nil) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + + wg3 := &sync.WaitGroup{} + wg3.Add(1) + go func() { + defer func() { + ginkgo.GinkgoRecover() + wg3.Done() + }() + // wait enough to reconcile ran for few times. + time.Sleep(5 * time.Second) + err = checkforVrfLinkExistence() + gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + }() + wg3.Wait() + + // Invoke reconcile method explicitly few times to ensure it's always working fine. + err = c.reconcile() + gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + err = checkforVrfLinkExistence() + gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + err = c.reconcile() + gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + err = checkforVrfLinkExistence() + gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + + return nil + }) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + }) +}) diff --git a/go-controller/pkg/observability/observability.go b/go-controller/pkg/observability/observability.go new file mode 100644 index 0000000000..6349c15199 --- /dev/null +++ b/go-controller/pkg/observability/observability.go @@ -0,0 +1,303 @@ +package observability + +import ( + "fmt" + "slices" + "strings" + "sync" + "time" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" + + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +// OVN observ app IDs. Make sure to always add new apps in the end. +const ( + DropSamplingID = iota + 1 + ACLNewTrafficSamplingID + ACLEstTrafficSamplingID +) + +// temporary const, until we have dynamic config +const DefaultObservabilityCollectorSetID = 42 + +// this is inferred from nbdb schema, check Sample_Collector.id +const maxCollectorID = 255 +const collectorFeaturesExternalID = "sample-features" + +// collectorConfig holds the configuration for a collector. +// It is allowed to set different probabilities for every feature. +// collectorSetID is used to set up sampling via OVSDB. +type collectorConfig struct { + collectorSetID int + // probability in percent, 0 to 100 + featuresProbability map[libovsdbops.SampleFeature]int +} + +type Manager struct { + nbClient libovsdbclient.Client + sampConfig *libovsdbops.SamplingConfig + collectorsLock sync.Mutex + // nbdb Collectors have probability. To allow different probabilities for different features, + // multiple nbdb Collectors will be created, one per probability. + // getCollectorKey() => collector.UUID + dbCollectors map[string]string + // cleaning up unused collectors may take time and multiple retries, as all referencing samples must be removed first. + // Therefore, we need to save state between those retries. + // getCollectorKey() => collector.SetID + unusedCollectors map[string]int + unusedCollectorsRetryInterval time.Duration + collectorsCleanupRetries int + // Only maxCollectorID collectors are allowed, each should have unique ID. + // this set is tracking already assigned IDs. + takenCollectorIDs sets.Set[int] +} + +func NewManager(nbClient libovsdbclient.Client) *Manager { + return &Manager{ + nbClient: nbClient, + collectorsLock: sync.Mutex{}, + dbCollectors: make(map[string]string), + unusedCollectors: make(map[string]int), + unusedCollectorsRetryInterval: time.Minute, + takenCollectorIDs: sets.New[int](), + } +} + +func (m *Manager) SamplingConfig() *libovsdbops.SamplingConfig { + return m.sampConfig +} + +func (m *Manager) Init() error { + // this will be read from the kube-api in the future + currentConfig := &collectorConfig{ + collectorSetID: DefaultObservabilityCollectorSetID, + featuresProbability: map[libovsdbops.SampleFeature]int{ + libovsdbops.EgressFirewallSample: 100, + libovsdbops.NetworkPolicySample: 100, + libovsdbops.AdminNetworkPolicySample: 100, + libovsdbops.MulticastSample: 100, + libovsdbops.UDNIsolationSample: 100, + }, + } + + return m.initWithConfig(currentConfig) +} + +func (m *Manager) initWithConfig(config *collectorConfig) error { + if err := m.setSamplingAppIDs(); err != nil { + return err + } + if err := m.setDbCollectors(); err != nil { + return err + } + + featuresConfig, err := m.addCollector(config) + if err != nil { + return err + } + m.sampConfig = libovsdbops.NewSamplingConfig(featuresConfig) + + // now cleanup stale collectors + m.deleteStaleCollectorsWithRetry() + return nil +} + +func (m *Manager) setDbCollectors() error { + m.collectorsLock.Lock() + defer m.collectorsLock.Unlock() + clear(m.dbCollectors) + collectors, err := libovsdbops.ListSampleCollectors(m.nbClient) + if err != nil { + return fmt.Errorf("error getting sample collectors: %w", err) + } + for _, collector := range collectors { + collectorKey := getCollectorKey(collector.SetID, collector.Probability) + m.dbCollectors[collectorKey] = collector.UUID + m.takenCollectorIDs.Insert(collector.ID) + // all collectors are unused, until we update existing configs + m.unusedCollectors[collectorKey] = collector.ID + } + return nil +} + +// Stale collectors can't be deleted until all referencing Samples are deleted. +// Samples will be deleted asynchronously by different controllers on their init with the new Manager. +// deleteStaleCollectorsWithRetry will retry, considering deletion should eventually succeed when all controllers +// update their db entries to use the latest observability config. +func (m *Manager) deleteStaleCollectorsWithRetry() { + if err := m.deleteStaleCollectors(); err != nil { + m.collectorsCleanupRetries += 1 + // allow retries for 1 hour, hopefully it will be enough for all handler to complete initial sync + if m.collectorsCleanupRetries > 60 { + m.collectorsCleanupRetries = 0 + klog.Errorf("Cleanup stale collectors failed after 30 retries: %v", err) + return + } + time.AfterFunc(m.unusedCollectorsRetryInterval, m.deleteStaleCollectorsWithRetry) + return + } + m.collectorsCleanupRetries = 0 + klog.Infof("Cleanup stale collectors succeeded.") +} + +func (m *Manager) deleteStaleCollectors() error { + m.collectorsLock.Lock() + defer m.collectorsLock.Unlock() + var lastErr error + for collectorKey, collectorSetID := range m.unusedCollectors { + collectorUUID := m.dbCollectors[collectorKey] + err := libovsdbops.DeleteSampleCollector(m.nbClient, &nbdb.SampleCollector{ + UUID: collectorUUID, + }) + if err != nil { + lastErr = err + klog.Infof("Error deleting collector with ID=%d: %v", collectorSetID, lastErr) + continue + } + delete(m.unusedCollectors, collectorKey) + delete(m.dbCollectors, collectorKey) + delete(m.takenCollectorIDs, collectorSetID) + } + return lastErr +} + +// Cleanup must be called when observability is no longer needed. +// It will return an error if some samples still exist in the db. +// This is expected, and Cleanup may be retried on the next restart. +func Cleanup(nbClient libovsdbclient.Client) error { + // Do the opposite of init + err := libovsdbops.DeleteSamplingAppsWithPredicate(nbClient, func(app *nbdb.SamplingApp) bool { + return true + }) + if err != nil { + return fmt.Errorf("error deleting sampling apps: %w", err) + } + + err = libovsdbops.DeleteSampleCollectorWithPredicate(nbClient, func(collector *nbdb.SampleCollector) bool { + return true + }) + if err != nil { + return fmt.Errorf("error deleting sample collectors: %w", err) + } + return nil +} + +func (m *Manager) setSamplingAppIDs() error { + var ops []libovsdb.Operation + var err error + for _, appConfig := range []struct { + id int + appType nbdb.SamplingAppType + }{ + { + id: DropSamplingID, + appType: nbdb.SamplingAppTypeDrop, + }, + { + id: ACLNewTrafficSamplingID, + appType: nbdb.SamplingAppTypeACLNew, + }, + { + id: ACLEstTrafficSamplingID, + appType: nbdb.SamplingAppTypeACLEst, + }, + } { + samplingApp := &nbdb.SamplingApp{ + ID: appConfig.id, + Type: appConfig.appType, + } + ops, err = libovsdbops.CreateOrUpdateSamplingAppsOps(m.nbClient, ops, samplingApp) + if err != nil { + return fmt.Errorf("error creating or updating sampling app %s: %w", appConfig.appType, err) + } + } + _, err = libovsdbops.TransactAndCheck(m.nbClient, ops) + return err +} + +func groupByProbability(c *collectorConfig) map[int][]libovsdbops.SampleFeature { + probabilities := make(map[int][]libovsdbops.SampleFeature) + for feature, percentProbability := range c.featuresProbability { + probability := percentToProbability(percentProbability) + probabilities[probability] = append(probabilities[probability], feature) + } + return probabilities +} + +func getCollectorKey(collectorID int, probability int) string { + return fmt.Sprintf("%d-%d", collectorID, probability) +} + +func (m *Manager) getFreeCollectorID() (int, error) { + for i := 1; i <= maxCollectorID; i++ { + if !m.takenCollectorIDs.Has(i) { + return i, nil + } + } + return 0, fmt.Errorf("no free collector IDs") +} + +func (m *Manager) addCollector(conf *collectorConfig) (map[libovsdbops.SampleFeature][]string, error) { + m.collectorsLock.Lock() + defer m.collectorsLock.Unlock() + sampleFeaturesConfig := make(map[libovsdbops.SampleFeature][]string) + probabilityConfig := groupByProbability(conf) + + for probability, features := range probabilityConfig { + collectorKey := getCollectorKey(conf.collectorSetID, probability) + var collectorUUID string + var ok bool + // ensure predictable externalID + slices.Sort(features) + collectorFeatures := strings.Join(features, ",") + if collectorUUID, ok = m.dbCollectors[collectorKey]; !ok { + collectorID, err := m.getFreeCollectorID() + if err != nil { + return sampleFeaturesConfig, err + } + collector := &nbdb.SampleCollector{ + ID: collectorID, + SetID: conf.collectorSetID, + Probability: probability, + ExternalIDs: map[string]string{ + collectorFeaturesExternalID: collectorFeatures, + }, + } + err = libovsdbops.CreateOrUpdateSampleCollector(m.nbClient, collector) + if err != nil { + return sampleFeaturesConfig, err + } + collectorUUID = collector.UUID + m.dbCollectors[collectorKey] = collectorUUID + m.takenCollectorIDs.Insert(collectorID) + } else { + // update collector's features + collector := &nbdb.SampleCollector{ + UUID: collectorUUID, + ExternalIDs: map[string]string{ + collectorFeaturesExternalID: collectorFeatures, + }, + } + err := libovsdbops.UpdateSampleCollectorExternalIDs(m.nbClient, collector) + if err != nil { + return sampleFeaturesConfig, err + } + // collector is used, remove from unused Collectors + delete(m.unusedCollectors, collectorKey) + } + for _, feature := range features { + sampleFeaturesConfig[feature] = append(sampleFeaturesConfig[feature], collectorUUID) + } + } + return sampleFeaturesConfig, nil +} + +func percentToProbability(percent int) int { + return 65535 * percent / 100 +} diff --git a/go-controller/pkg/observability/observability_suite_test.go b/go-controller/pkg/observability/observability_suite_test.go new file mode 100644 index 0000000000..8aa403e834 --- /dev/null +++ b/go-controller/pkg/observability/observability_suite_test.go @@ -0,0 +1,13 @@ +package observability + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestObservabilityManager(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Observability Manager Suite") +} diff --git a/go-controller/pkg/observability/observability_test.go b/go-controller/pkg/observability/observability_test.go new file mode 100644 index 0000000000..1550b5cc09 --- /dev/null +++ b/go-controller/pkg/observability/observability_test.go @@ -0,0 +1,374 @@ +package observability + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "strings" + "time" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" +) + +var _ = Describe("Observability Manager", func() { + var ( + nbClient libovsdbclient.Client + libovsdbCleanup *libovsdbtest.Context + manager *Manager + initialDB []libovsdbtest.TestData + samplingApps []libovsdbtest.TestData + ) + + const collectorUUID = "collector-uuid" + + startManager := func(data []libovsdbtest.TestData) { + var err error + nbClient, _, libovsdbCleanup, err = libovsdbtest.NewNBSBTestHarness(libovsdbtest.TestSetup{ + NBData: data}) + Expect(err).NotTo(HaveOccurred()) + manager = NewManager(nbClient) + err = manager.Init() + Expect(err).NotTo(HaveOccurred()) + } + + createACLWithPortGroup := func(acl *nbdb.ACL) *nbdb.PortGroup { + ops, err := libovsdbops.CreateOrUpdateACLsOps(nbClient, nil, manager.SamplingConfig(), acl) + Expect(err).NotTo(HaveOccurred()) + pg := &nbdb.PortGroup{ + UUID: "pg-uuid", + ACLs: []string{acl.UUID}, + } + ops, err = libovsdbops.CreateOrUpdatePortGroupsOps(nbClient, ops, pg) + Expect(err).NotTo(HaveOccurred()) + _, err = libovsdbops.TransactAndCheck(nbClient, ops) + Expect(err).NotTo(HaveOccurred()) + return pg + } + + // createOrUpdateACLPreserveUUID calls CreateOrUpdateACLs and sets the acl.UUID back. + // that is required as setting real UUID breaks libovsdb matching + createOrUpdateACLPreserveUUID := func(nbClient libovsdbclient.Client, samplingConfig *libovsdbops.SamplingConfig, acl *nbdb.ACL) error { + namedUUID := acl.UUID + err := libovsdbops.CreateOrUpdateACLs(nbClient, samplingConfig, acl) + acl.UUID = namedUUID + return err + } + + BeforeEach(func() { + initialDB = []libovsdbtest.TestData{ + &nbdb.SamplingApp{ + UUID: "drop-sampling-uuid", + ID: DropSamplingID, + Type: nbdb.SamplingAppTypeDrop, + }, + &nbdb.SamplingApp{ + UUID: "acl-new-traffic-sampling-uuid", + ID: ACLNewTrafficSamplingID, + Type: nbdb.SamplingAppTypeACLNew, + }, + &nbdb.SamplingApp{ + UUID: "acl-est-traffic-sampling-uuid", + ID: ACLEstTrafficSamplingID, + Type: nbdb.SamplingAppTypeACLEst, + }, + &nbdb.SampleCollector{ + UUID: collectorUUID, + ID: 1, + SetID: DefaultObservabilityCollectorSetID, + Probability: 65535, + ExternalIDs: map[string]string{ + collectorFeaturesExternalID: strings.Join([]string{libovsdbops.AdminNetworkPolicySample, libovsdbops.EgressFirewallSample, + libovsdbops.MulticastSample, libovsdbops.NetworkPolicySample, libovsdbops.UDNIsolationSample}, ","), + }, + }, + } + + samplingApps = initialDB[:3] + }) + + AfterEach(func() { + if libovsdbCleanup != nil { + libovsdbCleanup.Cleanup() + } + }) + + for _, dbSetup := range [][]libovsdbtest.TestData{ + nil, initialDB, + } { + msg := "db is empty" + if dbSetup != nil { + msg = "db is not empty" + } + When(msg, func() { + + It("should initialize database", func() { + startManager(dbSetup) + Eventually(nbClient).Should(libovsdbtest.HaveData(initialDB)) + }) + + It("should cleanup database", func() { + startManager(dbSetup) + Eventually(nbClient).Should(libovsdbtest.HaveData(initialDB)) + err := Cleanup(nbClient) + Expect(err).NotTo(HaveOccurred()) + Eventually(nbClient).Should(libovsdbtest.HaveEmptyData()) + }) + + It("should return correct collectors for an ACL, when feature is enabled", func() { + startManager(dbSetup) + + acl := &nbdb.ACL{ + UUID: "acl-uuid", + ExternalIDs: map[string]string{ + // NetworkPolicy is enabled by default + libovsdbops.OwnerTypeKey.String(): libovsdbops.NetworkPolicyOwnerType, + }, + } + pg := createACLWithPortGroup(acl) + + sample := &nbdb.Sample{ + UUID: "sample-uuid", + Metadata: int(libovsdbops.GetACLSampleID(acl)), + Collectors: []string{collectorUUID}, + } + acl.SampleNew = &sample.UUID + acl.SampleEst = &sample.UUID + + Eventually(nbClient).Should(libovsdbtest.HaveData(append(initialDB, sample, pg, acl))) + }) + It("should return correct collectors for an ACL, when feature is disabled", func() { + startManager(dbSetup) + acl := &nbdb.ACL{ + UUID: "acl-uuid", + ExternalIDs: map[string]string{ + // disabled-feature doesn't exist => not enabled + libovsdbops.OwnerTypeKey.String(): "disabled-feature", + }, + } + pg := createACLWithPortGroup(acl) + + Eventually(nbClient).Should(libovsdbtest.HaveData(append(initialDB, pg, acl))) + }) + }) + } + + It("should update existing ACL, when feature is enabled", func() { + // start with ACL that doesn't have samples + acl := &nbdb.ACL{ + UUID: "acl-uuid", + ExternalIDs: map[string]string{ + // NetworkPolicy is enabled by default + libovsdbops.OwnerTypeKey.String(): libovsdbops.NetworkPolicyOwnerType, + }, + } + pg := &nbdb.PortGroup{ + UUID: "pg-uuid", + ACLs: []string{acl.UUID}, + } + startManager(append(initialDB, acl, pg)) + + err := createOrUpdateACLPreserveUUID(nbClient, manager.SamplingConfig(), acl) + Expect(err).NotTo(HaveOccurred()) + // expect sample to be added to the existing acl + sample := &nbdb.Sample{ + UUID: "sample-uuid", + Metadata: int(libovsdbops.GetACLSampleID(acl)), + Collectors: []string{collectorUUID}, + } + acl.SampleNew = &sample.UUID + acl.SampleEst = &sample.UUID + Eventually(nbClient).Should(libovsdbtest.HaveData(append(initialDB, sample, pg, acl))) + }) + + It("should update existing ACL, when feature is disabled", func() { + // start with ACL that has samples + acl := &nbdb.ACL{ + UUID: "acl-uuid", + ExternalIDs: map[string]string{ + // disabled-feature doesn't exist => not enabled + libovsdbops.OwnerTypeKey.String(): "disabled-feature", + }, + } + pg := &nbdb.PortGroup{ + UUID: "pg-uuid", + ACLs: []string{acl.UUID}, + } + sample := &nbdb.Sample{ + UUID: "sample-uuid", + Metadata: int(libovsdbops.GetACLSampleID(acl)), + Collectors: []string{collectorUUID}, + } + acl.SampleNew = &sample.UUID + acl.SampleEst = &sample.UUID + startManager(append(initialDB, sample, acl, pg)) + + err := createOrUpdateACLPreserveUUID(nbClient, manager.SamplingConfig(), acl) + Expect(err).NotTo(HaveOccurred()) + // expect sample to be removed from the existing acl + acl.SampleNew = nil + acl.SampleEst = nil + + Eventually(nbClient).Should(libovsdbtest.HaveData(append(initialDB, pg, acl))) + }) + + It("should generate new sampleID on ACL action change", func() { + startManager(initialDB) + acl := &nbdb.ACL{ + UUID: "acl-uuid", + Action: nbdb.ACLActionAllowRelated, + ExternalIDs: map[string]string{ + // NetworkPolicy is enabled by default + libovsdbops.OwnerTypeKey.String(): libovsdbops.NetworkPolicyOwnerType, + }, + } + createACLWithPortGroup(acl) + + // find sample by ACL and save sampleID + acls, err := libovsdbops.FindACLs(nbClient, []*nbdb.ACL{acl}) + Expect(err).NotTo(HaveOccurred()) + Expect(acls).To(HaveLen(1)) + sample, err := libovsdbops.GetSample(nbClient, &nbdb.Sample{ + UUID: *acls[0].SampleNew, + }) + Expect(err).NotTo(HaveOccurred()) + sampleID := sample.Metadata + + // update acl Action + acl.Action = nbdb.ACLActionDrop + err = createOrUpdateACLPreserveUUID(nbClient, manager.SamplingConfig(), acl) + Expect(err).NotTo(HaveOccurred()) + + // find new sampleID + acls, err = libovsdbops.FindACLs(nbClient, []*nbdb.ACL{acl}) + Expect(err).NotTo(HaveOccurred()) + Expect(acls).To(HaveLen(1)) + sample, err = libovsdbops.GetSample(nbClient, &nbdb.Sample{ + UUID: *acls[0].SampleNew, + }) + Expect(err).NotTo(HaveOccurred()) + Expect(sample.Metadata).NotTo(Equal(sampleID)) + }) + + When("non-default config is used", func() { + startManagerWithConfig := func(data []libovsdbtest.TestData, config *collectorConfig) { + var err error + nbClient, _, libovsdbCleanup, err = libovsdbtest.NewNBSBTestHarness(libovsdbtest.TestSetup{ + NBData: data}) + Expect(err).NotTo(HaveOccurred()) + manager = NewManager(nbClient) + // tweak retry interval for testing + manager.unusedCollectorsRetryInterval = time.Second + err = manager.initWithConfig(config) + Expect(err).NotTo(HaveOccurred()) + } + + It("should update stale collectors", func() { + // tweakedConfig doesn't have EgressFirewall enabled, and sets different probability for NetworkPolicy + tweakedConfig := &collectorConfig{ + collectorSetID: DefaultObservabilityCollectorSetID, + featuresProbability: map[libovsdbops.SampleFeature]int{ + libovsdbops.NetworkPolicySample: 50, + libovsdbops.AdminNetworkPolicySample: 100, + libovsdbops.MulticastSample: 100, + libovsdbops.UDNIsolationSample: 100, + }, + } + startManagerWithConfig(initialDB, tweakedConfig) + expectedDB := append(samplingApps, + &nbdb.SampleCollector{ + UUID: collectorUUID, + ID: 1, + SetID: DefaultObservabilityCollectorSetID, + Probability: 65535, + ExternalIDs: map[string]string{ + collectorFeaturesExternalID: strings.Join([]string{libovsdbops.AdminNetworkPolicySample, + libovsdbops.MulticastSample, libovsdbops.UDNIsolationSample}, ","), + }, + }, + &nbdb.SampleCollector{ + UUID: collectorUUID + "-2", + ID: 2, + SetID: DefaultObservabilityCollectorSetID, + Probability: 32767, + ExternalIDs: map[string]string{ + collectorFeaturesExternalID: libovsdbops.NetworkPolicySample, + }, + }, + ) + Eventually(nbClient).Should(libovsdbtest.HaveData(expectedDB)) + }) + It("should cleanup stale collectors", func() { + // tweakedConfig doesn't have probability used by existing collector + tweakedConfig := &collectorConfig{ + collectorSetID: DefaultObservabilityCollectorSetID, + featuresProbability: map[libovsdbops.SampleFeature]int{ + libovsdbops.NetworkPolicySample: 50, + }, + } + + startManagerWithConfig(initialDB, tweakedConfig) + expectedDB := append(samplingApps, + &nbdb.SampleCollector{ + UUID: collectorUUID + "-2", + ID: 2, + SetID: DefaultObservabilityCollectorSetID, + Probability: 32767, + ExternalIDs: map[string]string{ + collectorFeaturesExternalID: libovsdbops.NetworkPolicySample, + }, + }, + ) + Eventually(nbClient).Should(libovsdbtest.HaveData(expectedDB)) + }) + It("should cleanup stale collectors after samples are removed", func() { + // tweakedConfig doesn't have probability used by existing collector + tweakedConfig := &collectorConfig{ + collectorSetID: DefaultObservabilityCollectorSetID, + featuresProbability: map[libovsdbops.SampleFeature]int{ + libovsdbops.EgressFirewallSample: 50, + }, + } + acl := &nbdb.ACL{ + UUID: "acl-uuid", + ExternalIDs: map[string]string{ + // NetworkPolicy is enabled by default + libovsdbops.OwnerTypeKey.String(): libovsdbops.NetworkPolicyOwnerType, + }, + } + pg := &nbdb.PortGroup{ + UUID: "pg-uuid", + ACLs: []string{acl.UUID}, + } + sample := &nbdb.Sample{ + UUID: "sample-uuid", + Metadata: int(libovsdbops.GetACLSampleID(acl)), + Collectors: []string{collectorUUID}, + } + acl.SampleNew = &sample.UUID + acl.SampleEst = &sample.UUID + testInitialDB := append(initialDB, sample, pg, acl) + + startManagerWithConfig(testInitialDB, tweakedConfig) + newCollector := &nbdb.SampleCollector{ + UUID: collectorUUID + "-2", + ID: 2, + SetID: DefaultObservabilityCollectorSetID, + Probability: 32767, + ExternalIDs: map[string]string{ + collectorFeaturesExternalID: libovsdbops.EgressFirewallSample, + }, + } + // initial collector will fail to be cleaned up, since acl sample still references that collector + expectedDB := append(testInitialDB, newCollector) + Consistently(nbClient).Should(libovsdbtest.HaveData(expectedDB)) + // now imitate netpol handler initialization by updating acl sample. + err := createOrUpdateACLPreserveUUID(nbClient, manager.SamplingConfig(), acl) + Expect(err).NotTo(HaveOccurred()) + // sample is removed, collector should be cleaned up now + expectedDB = append(samplingApps, pg, acl, newCollector) + Eventually(nbClient, 2*manager.unusedCollectorsRetryInterval).Should(libovsdbtest.HaveData(expectedDB)) + }) + }) +}) diff --git a/go-controller/pkg/ovn/address_set/address_set_suite_test.go b/go-controller/pkg/ovn/address_set/address_set_suite_test.go index 623b843b7a..6cd15119c7 100644 --- a/go-controller/pkg/ovn/address_set/address_set_suite_test.go +++ b/go-controller/pkg/ovn/address_set/address_set_suite_test.go @@ -3,7 +3,7 @@ package addressset import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/ovn/address_set/address_set_test.go b/go-controller/pkg/ovn/address_set/address_set_test.go index e0483dc90b..c4563738b4 100644 --- a/go-controller/pkg/ovn/address_set/address_set_test.go +++ b/go-controller/pkg/ovn/address_set/address_set_test.go @@ -1,7 +1,7 @@ package addressset import ( - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "github.com/urfave/cli/v2" diff --git a/go-controller/pkg/ovn/admin_network_policy_test.go b/go-controller/pkg/ovn/admin_network_policy_test.go index e3ad7dd6e2..1716ebfdcb 100644 --- a/go-controller/pkg/ovn/admin_network_policy_test.go +++ b/go-controller/pkg/ovn/admin_network_policy_test.go @@ -7,7 +7,7 @@ import ( "strings" "time" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/ovn/base_event_handler.go b/go-controller/pkg/ovn/base_event_handler.go index 3fe5f13c74..5422a4dd1b 100644 --- a/go-controller/pkg/ovn/base_event_handler.go +++ b/go-controller/pkg/ovn/base_event_handler.go @@ -213,6 +213,10 @@ func (h *baseNetworkControllerEventHandler) isObjectInTerminalState(objType refl func (h *baseNetworkControllerEventHandler) recordAddEvent(objType reflect.Type, obj interface{}) { switch objType { + case factory.PolicyType: + np := obj.(*knet.NetworkPolicy) + klog.V(5).Infof("Recording add event on network policy %s/%s", np.Namespace, np.Name) + metrics.GetConfigDurationRecorder().Start("networkpolicy", np.Namespace, np.Name) case factory.MultiNetworkPolicyType: mnp := obj.(*mnpapi.MultiNetworkPolicy) klog.V(5).Infof("Recording add event on multinetwork policy %s/%s", mnp.Namespace, mnp.Name) @@ -223,6 +227,10 @@ func (h *baseNetworkControllerEventHandler) recordAddEvent(objType reflect.Type, // RecordUpdateEvent records the udpate event on this given object. func (h *baseNetworkControllerEventHandler) recordUpdateEvent(objType reflect.Type, obj interface{}) { switch objType { + case factory.PolicyType: + np := obj.(*knet.NetworkPolicy) + klog.V(5).Infof("Recording update event on network policy %s/%s", np.Namespace, np.Name) + metrics.GetConfigDurationRecorder().Start("networkpolicy", np.Namespace, np.Name) case factory.MultiNetworkPolicyType: mnp := obj.(*mnpapi.MultiNetworkPolicy) klog.V(5).Infof("Recording update event on multinetwork policy %s/%s", mnp.Namespace, mnp.Name) @@ -233,6 +241,10 @@ func (h *baseNetworkControllerEventHandler) recordUpdateEvent(objType reflect.Ty // RecordDeleteEvent records the delete event on this given object. func (h *baseNetworkControllerEventHandler) recordDeleteEvent(objType reflect.Type, obj interface{}) { switch objType { + case factory.PolicyType: + np := obj.(*knet.NetworkPolicy) + klog.V(5).Infof("Recording delete event on network policy %s/%s", np.Namespace, np.Name) + metrics.GetConfigDurationRecorder().Start("networkpolicy", np.Namespace, np.Name) case factory.MultiNetworkPolicyType: mnp := obj.(*mnpapi.MultiNetworkPolicy) klog.V(5).Infof("Recording delete event on multinetwork policy %s/%s", mnp.Namespace, mnp.Name) @@ -243,6 +255,10 @@ func (h *baseNetworkControllerEventHandler) recordDeleteEvent(objType reflect.Ty // RecordSuccessEvent records the success event on this given object. func (h *baseNetworkControllerEventHandler) recordSuccessEvent(objType reflect.Type, obj interface{}) { switch objType { + case factory.PolicyType: + np := obj.(*knet.NetworkPolicy) + klog.V(5).Infof("Recording success event on network policy %s/%s", np.Namespace, np.Name) + metrics.GetConfigDurationRecorder().End("networkpolicy", np.Namespace, np.Name) case factory.MultiNetworkPolicyType: mnp := obj.(*mnpapi.MultiNetworkPolicy) klog.V(5).Infof("Recording success event on multinetwork policy %s/%s", mnp.Namespace, mnp.Name) diff --git a/go-controller/pkg/ovn/base_network_controller.go b/go-controller/pkg/ovn/base_network_controller.go index 7a43551f52..b8ac355cbf 100644 --- a/go-controller/pkg/ovn/base_network_controller.go +++ b/go-controller/pkg/ovn/base_network_controller.go @@ -1,13 +1,16 @@ package ovn import ( + "errors" "fmt" "net" + "reflect" "sync" "time" libovsdbclient "github.com/ovn-org/libovsdb/client" "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/pod" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" @@ -17,6 +20,8 @@ import ( libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + nad "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/network-attach-def-controller" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/observability" addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" lsm "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/logical_switch_manager" zoneic "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/zone_interconnect" @@ -24,9 +29,11 @@ import ( ovnretry "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/retry" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/syncmap" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + ovntypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" kapi "k8s.io/api/core/v1" + knet "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/util/sets" @@ -36,8 +43,6 @@ import ( ref "k8s.io/client-go/tools/reference" "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - - nadlister "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/listers/k8s.cni.cncf.io/v1" ) // CommonNetworkControllerInfo structure is place holder for all fields shared among controllers. @@ -89,6 +94,8 @@ type BaseNetworkController struct { retryNamespaces *ovnretry.RetryFramework // retry framework for network policies retryNetworkPolicies *ovnretry.RetryFramework + // retry framework for network policies + retryMultiNetworkPolicies *ovnretry.RetryFramework // retry framework for IPAMClaims retryIPAMClaims *ovnretry.RetryFramework @@ -110,7 +117,7 @@ type BaseNetworkController struct { ipamClaimsReconciler *persistentips.IPAMClaimReconciler // A cache of all logical ports known to the controller - logicalPortCache *portCache + logicalPortCache *PortCache // Info about known namespaces. You must use oc.getNamespaceLocked() or // oc.waitForNamespaceLocked() to read this map, and oc.createNamespaceLocked() @@ -158,6 +165,8 @@ type BaseNetworkController struct { // to the cluster router. Please see zone_interconnect/interconnect_handler.go for more details. zoneICHandler *zoneic.ZoneInterconnectHandler + // nadController used for getting network information for UDNs + nadController nad.NADController // releasedPodsBeforeStartup tracks pods per NAD (map of NADs to pods UIDs) // might have been already be released on startup releasedPodsBeforeStartup map[string]sets.Set[string] @@ -166,6 +175,8 @@ type BaseNetworkController struct { // IP addresses of OVN Cluster logical router port ("GwRouterToJoinSwitchPrefix + OVNClusterRouter") // connecting to the join switch ovnClusterLRPToJoinIfAddrs []*net.IPNet + + observManager *observability.Manager } // BaseSecondaryNetworkController structure holds per-network fields and network specific @@ -175,8 +186,10 @@ type BaseSecondaryNetworkController struct { networkID *int + // network policy events factory handler + netPolicyHandler *factory.Handler // multi-network policy events factory handler - policyHandler *factory.Handler + multiNetPolicyHandler *factory.Handler } func getNetworkControllerName(netName string) string { @@ -303,6 +316,38 @@ func (bnc *BaseNetworkController) syncNodeClusterRouterPort(node *kapi.Node, hos return err } + if util.IsNetworkSegmentationSupportEnabled() && + bnc.IsPrimaryNetwork() && !config.OVNKubernetesFeature.EnableInterconnect && + bnc.TopologyType() == types.Layer3Topology { + // since in nonIC the ovn_cluster_router is distributed, we must specify the gatewayPort for the + // conditional SNATs to signal OVN which gatewayport should be chosen if there are mutiple distributed + // gateway ports. Now that the LRP is created, let's update the NATs to reflect that. + lrp := nbdb.LogicalRouterPort{ + Name: lrpName, + } + logicalRouterPort, err := libovsdbops.GetLogicalRouterPort(bnc.nbClient, &lrp) + if err != nil { + return fmt.Errorf("failed to fetch gatewayport %s for network %q on node %q, err: %w", + lrpName, bnc.GetNetworkName(), node.Name, err) + } + gatewayPort := logicalRouterPort.UUID + p := func(item *nbdb.NAT) bool { + return item.ExternalIDs[types.NetworkExternalID] == bnc.GetNetworkName() && + item.LogicalPort != nil && *item.LogicalPort == lrpName && item.Match != "" + } + nonICConditonalSNATs, err := libovsdbops.FindNATsWithPredicate(bnc.nbClient, p) + if err != nil { + return fmt.Errorf("failed to fetch conditional NATs %s for network %q on node %q, err: %w", + lrpName, bnc.GetNetworkName(), node.Name, err) + } + for _, nat := range nonICConditonalSNATs { + nat.GatewayPort = &gatewayPort + } + if err := libovsdbops.CreateOrUpdateNATs(bnc.nbClient, &logicalRouter, nonICConditonalSNATs...); err != nil { + return fmt.Errorf("failed to fetch conditional NATs %s for network %q on node %q, err: %w", + lrpName, bnc.GetNetworkName(), node.Name, err) + } + } return nil } @@ -322,13 +367,8 @@ func (bnc *BaseNetworkController) createNodeLogicalSwitch(nodeName string, hostS logicalSwitch := nbdb.LogicalSwitch{ Name: switchName, } - if bnc.IsSecondary() { - logicalSwitch.ExternalIDs = map[string]string{ - types.NetworkExternalID: bnc.GetNetworkName(), - types.TopologyExternalID: bnc.TopologyType(), - } - } + logicalSwitch.ExternalIDs = util.GenerateExternalIDsForSwitchOrRouter(bnc.NetInfo) var v4Gateway, v6Gateway net.IP logicalSwitch.OtherConfig = map[string]string{} for _, hostSubnet := range hostSubnets { @@ -389,9 +429,11 @@ func (bnc *BaseNetworkController) createNodeLogicalSwitch(nodeName string, hostS Addresses: []string{"router"}, Options: map[string]string{ "router-port": types.RouterToSwitchPrefix + switchName, - "arp_proxy": kubevirt.ComposeARPProxyLSPOption(), }, } + if bnc.IsDefault() { + logicalSwitchPort.Options["arp_proxy"] = kubevirt.ComposeARPProxyLSPOption() + } sw := nbdb.LogicalSwitch{Name: switchName} err = libovsdbops.CreateOrUpdateLogicalSwitchPortsOnSwitch(bnc.nbClient, &sw, &logicalSwitchPort) if err != nil { @@ -584,10 +626,24 @@ func (bnc *BaseNetworkController) deleteNamespaceLocked(ns string) (*namespaceIn return nsInfo, nil } -func (bnc *BaseNetworkController) syncNodeManagementPort(node *kapi.Node, switchName string, hostSubnets []*net.IPNet, routeHostSubnets bool) ([]net.IP, error) { - macAddress, err := util.ParseNodeManagementPortMACAddresses(node, bnc.GetNetworkName()) - if err != nil { - return nil, err +func (bnc *BaseNetworkController) syncNodeManagementPort(node *kapi.Node, switchName, routerName string, hostSubnets []*net.IPNet) ([]net.IP, error) { + // get mac address from node only for legacy reasons, if it doesn't exist, then calculate it from subnets + var macAddress net.HardwareAddr + var err error + // find suitable MAC address + + if bnc.IsDefault() { + // check node annotation first for default network, to ensure we are not picking a new MAC when one was already configured + if macAddress, err = util.ParseNodeManagementPortMACAddresses(node, bnc.GetNetworkName()); err != nil && !util.IsAnnotationNotSetError(err) { + return nil, err + } + } + if len(macAddress) == 0 { + // calculate mac + if len(hostSubnets) == 0 { + return nil, fmt.Errorf("unable to generate MAC address, no subnets provided for network: %s", bnc.GetNetworkName()) + } + macAddress = util.IPAddrToHWAddr(util.GetNodeManagementIfAddr(hostSubnets[0]).IP) } var v4Subnet *net.IPNet @@ -605,19 +661,25 @@ func (bnc *BaseNetworkController) syncNodeManagementPort(node *kapi.Node, switch if !utilnet.IsIPv6CIDR(hostSubnet) { v4Subnet = hostSubnet } - if config.Gateway.Mode == config.GatewayModeLocal && routeHostSubnets { + if config.Gateway.Mode == config.GatewayModeLocal { lrsr := nbdb.LogicalRouterStaticRoute{ Policy: &nbdb.LogicalRouterStaticRoutePolicySrcIP, IPPrefix: hostSubnet.String(), Nexthop: mgmtIfAddr.IP.String(), } + if bnc.IsSecondary() { + lrsr.ExternalIDs = map[string]string{ + ovntypes.NetworkExternalID: bnc.GetNetworkName(), + ovntypes.TopologyExternalID: bnc.TopologyType(), + } + } p := func(item *nbdb.LogicalRouterStaticRoute) bool { return item.IPPrefix == lrsr.IPPrefix && libovsdbops.PolicyEqualPredicate(lrsr.Policy, item.Policy) } - err := libovsdbops.CreateOrReplaceLogicalRouterStaticRouteWithPredicate(bnc.nbClient, bnc.GetNetworkScopedClusterRouterName(), + err := libovsdbops.CreateOrReplaceLogicalRouterStaticRouteWithPredicate(bnc.nbClient, routerName, &lrsr, p, &lrsr.Nexthop) if err != nil { - return nil, fmt.Errorf("error creating static route %+v on router %s: %v", lrsr, bnc.GetNetworkScopedClusterRouterName(), err) + return nil, fmt.Errorf("error creating static route %+v on router %s: %v", lrsr, routerName, err) } } } @@ -633,11 +695,12 @@ func (bnc *BaseNetworkController) syncNodeManagementPort(node *kapi.Node, switch return nil, err } - // TODO(dceara): The cluster port group must be per network. - err = libovsdbops.AddPortsToPortGroup(bnc.nbClient, bnc.getClusterPortGroupName(types.ClusterPortGroupNameBase), logicalSwitchPort.UUID) - if err != nil { - klog.Errorf(err.Error()) - return nil, err + clusterPortGroupName := bnc.getClusterPortGroupName(types.ClusterPortGroupNameBase) + if err = libovsdbops.AddPortsToPortGroup(bnc.nbClient, clusterPortGroupName, logicalSwitchPort.UUID); err != nil { + err1 := fmt.Errorf("failed to add port %s to cluster port group %s (%s): %w", + logicalSwitchPort.Name, types.ClusterPortGroupNameBase, clusterPortGroupName, err) + klog.Error(err1) + return nil, err1 } if v4Subnet != nil { @@ -649,12 +712,24 @@ func (bnc *BaseNetworkController) syncNodeManagementPort(node *kapi.Node, switch return mgmtPortIPs, nil } -func (bnc *BaseNetworkController) syncNodeManagementPortRouteHostSubnets(node *kapi.Node, switchName string, hostSubnets []*net.IPNet) ([]net.IP, error) { - return bnc.syncNodeManagementPort(node, switchName, hostSubnets, true) -} +// addLocalPodToNamespaceLocked returns the ops needed to add the pod's IP to the namespace +// address set and the port UUID (if applicable) to the namespace port group. +// This function must be called with the nsInfo lock taken. +func (bnc *BaseNetworkController) addLocalPodToNamespaceLocked(nsInfo *namespaceInfo, ips []*net.IPNet, portUUID string) ([]ovsdb.Operation, error) { + var ops []ovsdb.Operation + var err error + + if ops, err = nsInfo.addressSet.AddAddressesReturnOps(util.IPNetsIPToStringSlice(ips)); err != nil { + return nil, err + } -func (bnc *BaseNetworkController) syncNodeManagementPortNoRouteHostSubnets(node *kapi.Node, switchName string, hostSubnets []*net.IPNet) ([]net.IP, error) { - return bnc.syncNodeManagementPort(node, switchName, hostSubnets, false) + if portUUID != "" && nsInfo.portGroupName != "" { + if ops, err = libovsdbops.AddPortsToPortGroupOps(bnc.nbClient, ops, nsInfo.portGroupName, portUUID); err != nil { + return nil, err + } + } + + return ops, nil } // WatchNodes starts the watching of the nodes resource and calls back the appropriate handler logic @@ -760,14 +835,9 @@ func (bnc *BaseNetworkController) isLocalZoneNode(node *kapi.Node) bool { return util.GetNodeZone(node) == bnc.zone } -// getActiveNetworkForNamespace returns the active network for the given namespace -// and is a wrapper around util.GetActiveNetworkForNamespace +// and is a wrapper around GetActiveNetworkForNamespace func (bnc *BaseNetworkController) getActiveNetworkForNamespace(namespace string) (util.NetInfo, error) { - var nadLister nadlister.NetworkAttachmentDefinitionLister - if util.IsNetworkSegmentationSupportEnabled() { - nadLister = bnc.watchFactory.NADInformer().Lister() - } - return util.GetActiveNetworkForNamespace(namespace, nadLister) + return bnc.nadController.GetActiveNetworkForNamespace(namespace) } // GetNetworkRole returns the role of this controller's @@ -805,7 +875,7 @@ func (bnc *BaseNetworkController) GetNetworkRole(pod *kapi.Pod) (string, error) } activeNetwork, err := bnc.getActiveNetworkForNamespace(pod.Namespace) if err != nil { - if util.IsUnknownActiveNetworkError(err) { + if util.IsUnprocessedActiveNetworkError(err) { bnc.recordPodErrorEvent(pod, err) } return "", err @@ -826,7 +896,7 @@ func (bnc *BaseNetworkController) isLayer2Interconnect() bool { return config.OVNKubernetesFeature.EnableInterconnect && bnc.NetInfo.TopologyType() == types.Layer2Topology } -func (bnc *BaseNetworkController) nodeZoneClusterChanged(oldNode, newNode *kapi.Node, newNodeIsLocalZone bool) bool { +func (bnc *BaseNetworkController) nodeZoneClusterChanged(oldNode, newNode *kapi.Node, newNodeIsLocalZone bool, netName string) bool { // Check if the annotations have changed. Use network topology and local params to skip unnecessary checks // NodeIDAnnotationChanged and NodeTransitSwitchPortAddrAnnotationChanged affects local and remote nodes @@ -839,7 +909,7 @@ func (bnc *BaseNetworkController) nodeZoneClusterChanged(oldNode, newNode *kapi. } // NodeGatewayRouterLRPAddrsAnnotationChanged would not affect local, nor localnet secondary network - if !newNodeIsLocalZone && bnc.NetInfo.TopologyType() != types.LocalnetTopology && util.NodeGatewayRouterLRPAddrsAnnotationChanged(oldNode, newNode) { + if !newNodeIsLocalZone && bnc.NetInfo.TopologyType() != types.LocalnetTopology && joinCIDRChanged(oldNode, newNode, netName) { return true } @@ -899,3 +969,137 @@ func (bnc *BaseNetworkController) findMigratablePodIPsForSubnets(subnets []*net. } return ipList, nil } + +func (bnc *BaseNetworkController) AddResourceCommon(objType reflect.Type, obj interface{}) error { + switch objType { + case factory.PolicyType: + np, ok := obj.(*knet.NetworkPolicy) + if !ok { + return fmt.Errorf("could not cast %T object to *knet.NetworkPolicy", obj) + } + netinfo, err := bnc.getActiveNetworkForNamespace(np.Namespace) + if err != nil { + return fmt.Errorf("could not get active network for namespace %s: %v", np.Namespace, err) + } + if bnc.GetNetworkName() != netinfo.GetNetworkName() { + return nil + } + if err := bnc.addNetworkPolicy(np); err != nil { + klog.Infof("Network Policy add failed for %s/%s, will try again later: %v", + np.Namespace, np.Name, err) + return err + } + default: + klog.Errorf("Can not process add resource event, object type %s is not supported", objType) + } + return nil +} + +func (bnc *BaseNetworkController) DeleteResourceCommon(objType reflect.Type, obj interface{}) error { + switch objType { + case factory.PolicyType: + knp, ok := obj.(*knet.NetworkPolicy) + if !ok { + return fmt.Errorf("could not cast obj of type %T to *knet.NetworkPolicy", obj) + } + netinfo, err := bnc.getActiveNetworkForNamespace(knp.Namespace) + if err != nil { + return fmt.Errorf("could not get active network for namespace %s: %v", knp.Namespace, err) + } + if bnc.GetNetworkName() != netinfo.GetNetworkName() { + return nil + } + return bnc.deleteNetworkPolicy(knp) + default: + klog.Errorf("Can not process delete resource event, object type %s is not supported", objType) + } + return nil +} + +func initLoadBalancerGroups(nbClient libovsdbclient.Client, netInfo util.NetInfo) ( + clusterLoadBalancerGroupUUID, switchLoadBalancerGroupUUID, routerLoadBalancerGroupUUID string, err error) { + + loadBalancerGroupName := netInfo.GetNetworkScopedLoadBalancerGroupName(ovntypes.ClusterLBGroupName) + clusterLBGroup := nbdb.LoadBalancerGroup{Name: loadBalancerGroupName} + ops, err := libovsdbops.CreateOrUpdateLoadBalancerGroupOps(nbClient, nil, &clusterLBGroup) + if err != nil { + klog.Errorf("Error creating operation for cluster-wide load balancer group %s: %v", loadBalancerGroupName, err) + return + } + + loadBalancerGroupName = netInfo.GetNetworkScopedLoadBalancerGroupName(ovntypes.ClusterSwitchLBGroupName) + clusterSwitchLBGroup := nbdb.LoadBalancerGroup{Name: loadBalancerGroupName} + ops, err = libovsdbops.CreateOrUpdateLoadBalancerGroupOps(nbClient, ops, &clusterSwitchLBGroup) + if err != nil { + klog.Errorf("Error creating operation for cluster-wide switch load balancer group %s: %v", loadBalancerGroupName, err) + return + } + + loadBalancerGroupName = netInfo.GetNetworkScopedLoadBalancerGroupName(ovntypes.ClusterRouterLBGroupName) + clusterRouterLBGroup := nbdb.LoadBalancerGroup{Name: loadBalancerGroupName} + ops, err = libovsdbops.CreateOrUpdateLoadBalancerGroupOps(nbClient, ops, &clusterRouterLBGroup) + if err != nil { + klog.Errorf("Error creating operation for cluster-wide router load balancer group %s: %v", loadBalancerGroupName, err) + return + } + + lbs := []*nbdb.LoadBalancerGroup{&clusterLBGroup, &clusterSwitchLBGroup, &clusterRouterLBGroup} + if _, err = libovsdbops.TransactAndCheckAndSetUUIDs(nbClient, lbs, ops); err != nil { + klog.Errorf("Error creating cluster-wide router load balancer group %s: %v", loadBalancerGroupName, err) + return + } + + clusterLoadBalancerGroupUUID = clusterLBGroup.UUID + switchLoadBalancerGroupUUID = clusterSwitchLBGroup.UUID + routerLoadBalancerGroupUUID = clusterRouterLBGroup.UUID + + return +} + +func (bnc *BaseNetworkController) setupClusterPortGroups() error { + pgIDs := bnc.getClusterPortGroupDbIDs(types.ClusterPortGroupNameBase) + pg := &nbdb.PortGroup{ + Name: libovsdbutil.GetPortGroupName(pgIDs), + } + pg, err := libovsdbops.GetPortGroup(bnc.nbClient, pg) + if err != nil && !errors.Is(err, libovsdbclient.ErrNotFound) { + return fmt.Errorf("failed to query cluster port group for network %s: %w", bnc.GetNetworkName(), err) + } + if pg == nil { + // we didn't find an existing clusterPG, let's create a new empty PG (fresh cluster install) + // Create a cluster-wide port group that all logical switch ports are part of + pg := libovsdbutil.BuildPortGroup(pgIDs, nil, nil) + err = libovsdbops.CreateOrUpdatePortGroups(bnc.nbClient, pg) + if err != nil { + return fmt.Errorf("failed to create cluster port group for network %s: %w", bnc.GetNetworkName(), err) + } + } + + pgIDs = bnc.getClusterPortGroupDbIDs(types.ClusterRtrPortGroupNameBase) + pg = &nbdb.PortGroup{ + Name: libovsdbutil.GetPortGroupName(pgIDs), + } + pg, err = libovsdbops.GetPortGroup(bnc.nbClient, pg) + if err != nil && !errors.Is(err, libovsdbclient.ErrNotFound) { + return fmt.Errorf("failed to query cluster router port group for network %s: %w", bnc.GetNetworkName(), err) + } + if pg == nil { + // we didn't find an existing clusterRtrPG, let's create a new empty PG (fresh cluster install) + // Create a cluster-wide port group with all node-to-cluster router + // logical switch ports. Currently the only user is multicast but it might + // be used for other features in the future. + pg = libovsdbutil.BuildPortGroup(pgIDs, nil, nil) + err = libovsdbops.CreateOrUpdatePortGroups(bnc.nbClient, pg) + if err != nil { + return fmt.Errorf("failed to create cluster router port group for network %s: %w", bnc.GetNetworkName(), err) + } + } + return nil +} + +func (bnc *BaseNetworkController) GetSamplingConfig() *libovsdbops.SamplingConfig { + if bnc.observManager != nil { + return bnc.observManager.SamplingConfig() + } + return nil +} diff --git a/go-controller/pkg/ovn/base_network_controller_multicast.go b/go-controller/pkg/ovn/base_network_controller_multicast.go index 74d2326732..1400de46e7 100644 --- a/go-controller/pkg/ovn/base_network_controller_multicast.go +++ b/go-controller/pkg/ovn/base_network_controller_multicast.go @@ -128,7 +128,7 @@ func (bnc *BaseNetworkController) createMulticastAllowPolicy(ns string, nsInfo * ingressACL := libovsdbutil.BuildACL(dbIDs, types.DefaultMcastAllowPriority, ingressMatch, nbdb.ACLActionAllow, nil, aclPipeline) acls := []*nbdb.ACL{egressACL, ingressACL} - ops, err := libovsdbops.CreateOrUpdateACLsOps(bnc.nbClient, nil, acls...) + ops, err := libovsdbops.CreateOrUpdateACLsOps(bnc.nbClient, nil, bnc.GetSamplingConfig(), acls...) if err != nil { return err } @@ -189,7 +189,7 @@ func (bnc *BaseNetworkController) createDefaultDenyMulticastPolicy() error { acl := libovsdbutil.BuildACL(dbIDs, types.DefaultMcastDenyPriority, match, nbdb.ACLActionDrop, nil, aclPipeline) acls = append(acls, acl) } - ops, err := libovsdbops.CreateOrUpdateACLsOps(bnc.nbClient, nil, acls...) + ops, err := libovsdbops.CreateOrUpdateACLsOps(bnc.nbClient, nil, bnc.GetSamplingConfig(), acls...) if err != nil { return err } @@ -232,7 +232,7 @@ func (bnc *BaseNetworkController) createDefaultAllowMulticastPolicy() error { acls = append(acls, acl) } - ops, err := libovsdbops.CreateOrUpdateACLsOps(bnc.nbClient, nil, acls...) + ops, err := libovsdbops.CreateOrUpdateACLsOps(bnc.nbClient, nil, bnc.GetSamplingConfig(), acls...) if err != nil { return err } @@ -322,3 +322,28 @@ func (bnc *BaseNetworkController) syncNsMulticast(k8sNamespaces map[string]bool) return nil } + +func (bnc *BaseNetworkController) syncDefaultMulticastPolicies() error { + // If supported, enable IGMP relay on the router to forward multicast + // traffic between nodes. + if bnc.multicastSupport { + // Drop IP multicast globally. Multicast is allowed only if explicitly + // enabled in a namespace. + if err := bnc.createDefaultDenyMulticastPolicy(); err != nil { + klog.Errorf("Failed to create default deny multicast policy, error: %v", err) + return err + } + + // Allow IP multicast from node switch to cluster router and from + // cluster router to node switch. + if err := bnc.createDefaultAllowMulticastPolicy(); err != nil { + klog.Errorf("Failed to create default deny multicast policy, error: %v", err) + return err + } + } else { + if err := bnc.disableMulticast(); err != nil { + return fmt.Errorf("failed to delete default multicast policy, error: %v", err) + } + } + return nil +} diff --git a/go-controller/pkg/ovn/base_network_controller_multipolicy_test.go b/go-controller/pkg/ovn/base_network_controller_multipolicy_test.go index 971b30f88e..025826019f 100644 --- a/go-controller/pkg/ovn/base_network_controller_multipolicy_test.go +++ b/go-controller/pkg/ovn/base_network_controller_multipolicy_test.go @@ -1,7 +1,7 @@ package ovn import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" netv1 "k8s.io/api/networking/v1" diff --git a/go-controller/pkg/ovn/base_network_controller_namespace.go b/go-controller/pkg/ovn/base_network_controller_namespace.go index 8f7849b785..41b66b1729 100644 --- a/go-controller/pkg/ovn/base_network_controller_namespace.go +++ b/go-controller/pkg/ovn/base_network_controller_namespace.go @@ -71,12 +71,16 @@ func getNamespaceAddrSetDbIDs(namespaceName, controller string) *libovsdbops.DbO // WatchNamespaces starts the watching of namespace resource and calls // back the appropriate handler logic func (bnc *BaseNetworkController) WatchNamespaces() error { - if bnc.IsSecondary() { + if bnc.IsPrimaryNetwork() && !util.IsNetworkSegmentationSupportEnabled() { + // For primary user defined networks, we don't have to watch namespace events if + // network segmentation support is not enabled. + return nil + } + + if bnc.IsSecondary() && !util.IsMultiNetworkPoliciesSupportEnabled() { // For secondary networks, we don't have to watch namespace events if // multi-network policy support is not enabled. - if !util.IsMultiNetworkPoliciesSupportEnabled() { - return nil - } + return nil } if bnc.namespaceHandler != nil { @@ -84,9 +88,10 @@ func (bnc *BaseNetworkController) WatchNamespaces() error { } handler, err := bnc.retryNamespaces.WatchResource() - if err == nil { - bnc.namespaceHandler = handler + if err != nil { + return err } + bnc.namespaceHandler = handler return err } diff --git a/go-controller/pkg/ovn/base_network_controller_pods.go b/go-controller/pkg/ovn/base_network_controller_pods.go index d9261b5c84..c00d2d2061 100644 --- a/go-controller/pkg/ovn/base_network_controller_pods.go +++ b/go-controller/pkg/ovn/base_network_controller_pods.go @@ -444,7 +444,7 @@ func (bnc *BaseNetworkController) ensurePodAnnotation(pod *kapi.Pod, nadName str } func (bnc *BaseNetworkController) addLogicalPortToNetwork(pod *kapi.Pod, nadName string, - network *nadapi.NetworkSelectionElement) (ops []ovsdb.Operation, + network *nadapi.NetworkSelectionElement, enable *bool) (ops []ovsdb.Operation, lsp *nbdb.LogicalSwitchPort, podAnnotation *util.PodAnnotation, newlyCreatedPort bool, err error) { var ls *nbdb.LogicalSwitch @@ -511,6 +511,8 @@ func (bnc *BaseNetworkController) addLogicalPortToNetwork(pod *kapi.Pod, nadName } } + var customFields []libovsdbops.ModelUpdateField + lsp.Options = make(map[string]string) // Unique identifier to distinguish interfaces for recreated pods, also set by ovnkube-node // ovn-controller will claim the OVS interface only if external_ids:iface-id @@ -530,7 +532,10 @@ func (bnc *BaseNetworkController) addLogicalPortToNetwork(pod *kapi.Pod, nadName // chassis if ovnkube-node isn't running correctly and hasn't cleared // out iface-id for an old instance of this pod, and the pod got // rescheduled. - lsp.Options["requested-chassis"] = pod.Spec.NodeName + + if !config.Kubernetes.DisableRequestedChassis { + lsp.Options["requested-chassis"] = pod.Spec.NodeName + } // let's calculate if this network controller's role for this pod // and pass that information while determining the podAnnotations @@ -564,6 +569,7 @@ func (bnc *BaseNetworkController) addLogicalPortToNetwork(pod *kapi.Pod, nadName } lsp.Addresses = addresses + customFields = append(customFields, libovsdbops.LogicalSwitchPortAddresses) // add external ids lsp.ExternalIDs = map[string]string{"namespace": pod.Namespace, "pod": "true"} @@ -575,6 +581,7 @@ func (bnc *BaseNetworkController) addLogicalPortToNetwork(pod *kapi.Pod, nadName // CNI depends on the flows from port security, delay setting it until end lsp.PortSecurity = addresses + customFields = append(customFields, libovsdbops.LogicalSwitchPortPortSecurity) // On layer2 topology with interconnect, we need to add specific port config if bnc.isLayer2Interconnect() { @@ -583,9 +590,19 @@ func (bnc *BaseNetworkController) addLogicalPortToNetwork(pod *kapi.Pod, nadName if err != nil { return nil, nil, nil, false, err } + if isRemotePort { + customFields = append(customFields, libovsdbops.LogicalSwitchPortType) + } + } + if len(lsp.Options) != 0 { + customFields = append(customFields, libovsdbops.LogicalSwitchPortOptions) } - ops, err = libovsdbops.CreateOrUpdateLogicalSwitchPortsOnSwitchOps(bnc.nbClient, nil, ls, lsp) + lsp.Enabled = enable + if lsp.Enabled != nil { + customFields = append(customFields, libovsdbops.LogicalSwitchPortEnabled) + } + ops, err = libovsdbops.CreateOrUpdateLogicalSwitchPortsOnSwitchWithCustomFieldsOps(bnc.nbClient, nil, ls, customFields, lsp) if err != nil { return nil, nil, nil, false, fmt.Errorf("error creating logical switch port %+v on switch %+v: %+v", *lsp, *ls, err) diff --git a/go-controller/pkg/ovn/base_network_controller_policy.go b/go-controller/pkg/ovn/base_network_controller_policy.go index f0f59577dd..73e18e87e3 100644 --- a/go-controller/pkg/ovn/base_network_controller_policy.go +++ b/go-controller/pkg/ovn/base_network_controller_policy.go @@ -200,6 +200,77 @@ func NewNetworkPolicy(policy *knet.NetworkPolicy) *networkPolicy { return np } +func (bnc *BaseNetworkController) syncNetworkPolicies(networkPolicies []interface{}) error { + expectedPolicies := make(map[string]map[string]bool) + for _, npInterface := range networkPolicies { + policy, ok := npInterface.(*knet.NetworkPolicy) + if !ok { + return fmt.Errorf("spurious object in syncNetworkPolicies: %v", npInterface) + } + if nsMap, ok := expectedPolicies[policy.Namespace]; ok { + nsMap[policy.Name] = true + } else { + expectedPolicies[policy.Namespace] = map[string]bool{ + policy.Name: true, + } + } + } + err := bnc.syncNetworkPoliciesCommon(expectedPolicies) + if err != nil { + return err + } + + // add default hairpin allow acl + err = bnc.addHairpinAllowACL() + if err != nil { + return fmt.Errorf("failed to create allow hairpin acl: %w", err) + } + + return nil +} + +func (bnc *BaseNetworkController) addHairpinAllowACL() error { + var v4Match, v6Match, match string + + if config.IPv4Mode { + v4Match = fmt.Sprintf("%s.src == %s", "ip4", config.Gateway.MasqueradeIPs.V4OVNServiceHairpinMasqueradeIP.String()) + match = v4Match + } + if config.IPv6Mode { + v6Match = fmt.Sprintf("%s.src == %s", "ip6", config.Gateway.MasqueradeIPs.V6OVNServiceHairpinMasqueradeIP.String()) + match = v6Match + } + if config.IPv4Mode && config.IPv6Mode { + match = fmt.Sprintf("(%s || %s)", v4Match, v6Match) + } + + ingressACLIDs := bnc.getNetpolDefaultACLDbIDs(string(knet.PolicyTypeIngress)) + ingressACL := libovsdbutil.BuildACL(ingressACLIDs, types.DefaultAllowPriority, match, + nbdb.ACLActionAllowRelated, nil, libovsdbutil.LportIngress) + + egressACLIDs := bnc.getNetpolDefaultACLDbIDs(string(knet.PolicyTypeEgress)) + egressACL := libovsdbutil.BuildACL(egressACLIDs, types.DefaultAllowPriority, match, + nbdb.ACLActionAllowRelated, nil, libovsdbutil.LportEgressAfterLB) + + ops, err := libovsdbops.CreateOrUpdateACLsOps(bnc.nbClient, nil, nil, ingressACL, egressACL) + if err != nil { + return fmt.Errorf("failed to create or update hairpin allow ACL %v", err) + } + + ops, err = libovsdbops.AddACLsToPortGroupOps(bnc.nbClient, ops, bnc.getClusterPortGroupName(types.ClusterPortGroupNameBase), + ingressACL, egressACL) + if err != nil { + return fmt.Errorf("failed to add ACL hairpin allow acl to port group: %v", err) + } + + _, err = libovsdbops.TransactAndCheck(bnc.nbClient, ops) + if err != nil { + return err + } + + return nil +} + // syncNetworkPoliciesCommon syncs logical entities associated with existing network policies. // It serves both networkpolicies (for default network) and multi-networkpolicies (for secondary networks) func (bnc *BaseNetworkController) syncNetworkPoliciesCommon(expectedPolicies map[string]map[string]bool) error { @@ -260,7 +331,7 @@ func (bnc *BaseNetworkController) addAllowACLFromNode(switchName string, mgmtPor nodeACL := libovsdbutil.BuildACL(dbIDs, types.DefaultAllowPriority, match, nbdb.ACLActionAllowRelated, nil, libovsdbutil.LportIngress) - ops, err := libovsdbops.CreateOrUpdateACLsOps(bnc.nbClient, nil, nodeACL) + ops, err := libovsdbops.CreateOrUpdateACLsOps(bnc.nbClient, nil, bnc.GetSamplingConfig(), nodeACL) if err != nil { return fmt.Errorf("failed to create or update ACL %v: %v", nodeACL, err) } @@ -365,7 +436,7 @@ func (bnc *BaseNetworkController) createDefaultDenyPGAndACLs(namespace, policy s egressPGIDs := bnc.getDefaultDenyPolicyPortGroupIDs(namespace, libovsdbutil.ACLEgress) egressPGName := libovsdbutil.GetPortGroupName(egressPGIDs) egressDenyACL, egressAllowACL := bnc.buildDenyACLs(namespace, egressPGName, aclLogging, libovsdbutil.ACLEgress) - ops, err := libovsdbops.CreateOrUpdateACLsOps(bnc.nbClient, nil, ingressDenyACL, ingressAllowACL, egressDenyACL, egressAllowACL) + ops, err := libovsdbops.CreateOrUpdateACLsOps(bnc.nbClient, nil, bnc.GetSamplingConfig(), ingressDenyACL, ingressAllowACL, egressDenyACL, egressAllowACL) if err != nil { return err } @@ -560,7 +631,7 @@ func (bnc *BaseNetworkController) getNewLocalPolicyPorts(np *networkPolicy, // getExistingLocalPolicyPorts will find and return port info for every given pod obj, that is present in np.localPods. func (bnc *BaseNetworkController) getExistingLocalPolicyPorts(np *networkPolicy, - objs ...interface{}) (policyPortsToUUIDs map[string]string, policyPortUUIDs []string) { + objs ...interface{}) (policyPortsToUUIDs map[string]string, policyPortUUIDs []string, err error) { klog.Infof("Processing NetworkPolicy %s/%s to delete %d local pods...", np.namespace, np.name, len(objs)) policyPortUUIDs = []string{} @@ -767,7 +838,10 @@ func (bnc *BaseNetworkController) handleLocalPodSelectorDelFunc(np *networkPolic return nil } - portNamesToUUIDs, policyPortUUIDs := bnc.getExistingLocalPolicyPorts(np, objs...) + portNamesToUUIDs, policyPortUUIDs, err := bnc.getExistingLocalPolicyPorts(np, objs...) + if err != nil { + return err + } if len(portNamesToUUIDs) > 0 { var err error @@ -987,7 +1061,7 @@ func (bnc *BaseNetworkController) createNetworkPolicy(policy *knet.NetworkPolicy ops := []ovsdb.Operation{} acls := bnc.buildNetworkPolicyACLs(np, aclLogging) - ops, err = libovsdbops.CreateOrUpdateACLsOps(bnc.nbClient, ops, acls...) + ops, err = libovsdbops.CreateOrUpdateACLsOps(bnc.nbClient, ops, bnc.GetSamplingConfig(), acls...) if err != nil { return fmt.Errorf("failed to create ACL ops: %v", err) } @@ -1392,7 +1466,7 @@ func (bnc *BaseNetworkController) peerNamespaceUpdate(np *networkPolicy, gp *gre } // buildLocalPodACLs is safe for concurrent use, see function comment for details acls, deletedACLs := gp.buildLocalPodACLs(np.portGroupName, aclLogging) - ops, err := libovsdbops.CreateOrUpdateACLsOps(bnc.nbClient, nil, acls...) + ops, err := libovsdbops.CreateOrUpdateACLsOps(bnc.nbClient, nil, bnc.GetSamplingConfig(), acls...) if err != nil { return err } diff --git a/go-controller/pkg/ovn/base_network_controller_secondary.go b/go-controller/pkg/ovn/base_network_controller_secondary.go index 4a68d55daf..062c6ac618 100644 --- a/go-controller/pkg/ovn/base_network_controller_secondary.go +++ b/go-controller/pkg/ovn/base_network_controller_secondary.go @@ -5,6 +5,7 @@ import ( "fmt" "net" "reflect" + "strings" "time" ipamclaimsapi "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" @@ -13,8 +14,11 @@ import ( libovsdbclient "github.com/ovn-org/libovsdb/client" "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/generator/udn" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kubevirt" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" @@ -23,9 +27,11 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" utilerrors "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/errors" + corev1 "k8s.io/api/core/v1" kapi "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/klog/v2" + utilnet "k8s.io/utils/net" "k8s.io/utils/ptr" ) @@ -90,7 +96,7 @@ func (bsnc *BaseSecondaryNetworkController) AddSecondaryNetworkResourceCommon(ob return nil default: - return fmt.Errorf("object type %s not supported", objType) + return bsnc.AddResourceCommon(objType, obj) } return nil } @@ -212,7 +218,7 @@ func (bsnc *BaseSecondaryNetworkController) DeleteSecondaryNetworkResourceCommon klog.Infof("Released IPs %q for network %q", ipamClaim.Status.IPs, ipamClaim.Spec.Network) default: - return fmt.Errorf("object type %s not supported", objType) + return bsnc.DeleteResourceCommon(objType, obj) } return nil } @@ -226,12 +232,28 @@ func (bsnc *BaseSecondaryNetworkController) ensurePodForSecondaryNetwork(pod *ka return nil } - if util.PodWantsHostNetwork(pod) && !addPort { + if util.PodWantsHostNetwork(pod) { + return nil + } + + var kubevirtLiveMigrationStatus *kubevirt.LiveMigrationStatus + var err error + + if kubevirt.IsPodAllowedForMigration(pod, bsnc.NetInfo) { + kubevirtLiveMigrationStatus, err = kubevirt.DiscoverLiveMigrationStatus(bsnc.watchFactory, pod) + if err != nil { + return fmt.Errorf("failed to discover Live-migration status: %w", err) + } + } + updatePort := kubevirtLiveMigrationStatus != nil && pod.Name == kubevirtLiveMigrationStatus.TargetPod.Name + + if !addPort && !updatePort { return nil } // If a node does not have an assigned hostsubnet don't wait for the logical switch to appear - switchName, err := bsnc.getExpectedSwitchName(pod) + var switchName string + switchName, err = bsnc.getExpectedSwitchName(pod) if err != nil { return err } @@ -265,7 +287,7 @@ func (bsnc *BaseSecondaryNetworkController) ensurePodForSecondaryNetwork(pod *ka var errs []error for nadName, network := range networkMap { - if err = bsnc.addLogicalPortToNetworkForNAD(pod, nadName, switchName, network); err != nil { + if err = bsnc.addLogicalPortToNetworkForNAD(pod, nadName, switchName, network, kubevirtLiveMigrationStatus); err != nil { errs = append(errs, fmt.Errorf("failed to add logical port of Pod %s/%s for NAD %s: %w", pod.Namespace, pod.Name, nadName, err)) } } @@ -276,7 +298,7 @@ func (bsnc *BaseSecondaryNetworkController) ensurePodForSecondaryNetwork(pod *ka } func (bsnc *BaseSecondaryNetworkController) addLogicalPortToNetworkForNAD(pod *kapi.Pod, nadName, switchName string, - network *nadapi.NetworkSelectionElement) error { + network *nadapi.NetworkSelectionElement, kubevirtLiveMigrationStatus *kubevirt.LiveMigrationStatus) error { var libovsdbExecuteTime time.Duration start := time.Now() @@ -291,6 +313,15 @@ func (bsnc *BaseSecondaryNetworkController) addLogicalPortToNetworkForNAD(pod *k var lsp *nbdb.LogicalSwitchPort var newlyCreated bool + var lspEnabled *bool + // actions on the pods' LSP are only triggerred from the target pod + shouldHandleLiveMigration := kubevirtLiveMigrationStatus != nil && pod.Name == kubevirtLiveMigrationStatus.TargetPod.Name + if shouldHandleLiveMigration { + // LSP should be altered inside addLogicalPortToNetwork() before ops are generated because one cannot append + // multiple ops regarding the same object in the same transact, so passing enabled parameter. + lspEnabled = ptr.To(kubevirtLiveMigrationStatus.IsTargetDomainReady()) + } + // we need to create a logical port for all local pods // we also need to create a remote logical port for remote pods on layer2 // topologies with interconnect @@ -298,7 +329,7 @@ func (bsnc *BaseSecondaryNetworkController) addLogicalPortToNetworkForNAD(pod *k requiresLogicalPort := isLocalPod || bsnc.isLayer2Interconnect() if requiresLogicalPort { - ops, lsp, podAnnotation, newlyCreated, err = bsnc.addLogicalPortToNetwork(pod, nadName, network) + ops, lsp, podAnnotation, newlyCreated, err = bsnc.addLogicalPortToNetwork(pod, nadName, network, lspEnabled) if err != nil { return err } @@ -322,6 +353,14 @@ func (bsnc *BaseSecondaryNetworkController) addLogicalPortToNetworkForNAD(pod *k bsnc.logicalPortCache.remove(pod, nadName) } + if shouldHandleLiveMigration && + kubevirtLiveMigrationStatus.IsTargetDomainReady() { + ops, err = bsnc.disableLiveMigrationSourceLSPOps(kubevirtLiveMigrationStatus, nadName, ops) + if err != nil { + return fmt.Errorf("failed to create LSP ops for source pod during Live-migration status: %w", err) + } + } + if podAnnotation == nil { podAnnotation, err = util.UnmarshalPodAnnotation(pod.Annotations, nadName) if err != nil { @@ -329,15 +368,32 @@ func (bsnc *BaseSecondaryNetworkController) addLogicalPortToNetworkForNAD(pod *k } } - if bsnc.doesNetworkRequireIPAM() && util.IsMultiNetworkPoliciesSupportEnabled() { + if bsnc.doesNetworkRequireIPAM() && + (util.IsMultiNetworkPoliciesSupportEnabled() || (util.IsNetworkSegmentationSupportEnabled() && bsnc.IsPrimaryNetwork())) { // Ensure the namespace/nsInfo exists - addOps, err := bsnc.addPodToNamespaceForSecondaryNetwork(pod.Namespace, podAnnotation.IPs) + portUUID := "" + if lsp != nil { + portUUID = lsp.UUID + } + addOps, err := bsnc.addPodToNamespaceForSecondaryNetwork(pod.Namespace, podAnnotation.IPs, portUUID) if err != nil { return err } ops = append(ops, addOps...) } + if util.IsNetworkSegmentationSupportEnabled() && bsnc.IsPrimaryNetwork() && config.Gateway.DisableSNATMultipleGWs { + // we need to add per-pod SNATs for UDN networks + snatOps, err := bsnc.addPerPodSNATOps(pod, podAnnotation.IPs) + if err != nil { + return fmt.Errorf("failed to construct SNAT for pod %s/%s which is part of network %s, err: %v", + pod.Namespace, pod.Name, bsnc.GetNetworkName(), err) + } + if snatOps != nil { + ops = append(ops, snatOps...) + } + } + recordOps, txOkCallBack, _, err := bsnc.AddConfigDurationRecord("pod", pod.Namespace, pod.Name) if err != nil { klog.Errorf("Config duration recorder: %v", err) @@ -354,6 +410,11 @@ func (bsnc *BaseSecondaryNetworkController) addLogicalPortToNetworkForNAD(pod *k if lsp != nil { _ = bsnc.logicalPortCache.add(pod, switchName, nadName, lsp.UUID, podAnnotation.MAC, podAnnotation.IPs) + if bsnc.requireDHCP(pod) { + if err := bsnc.ensureDHCP(pod, podAnnotation, lsp); err != nil { + return err + } + } } if isLocalPod { @@ -366,6 +427,30 @@ func (bsnc *BaseSecondaryNetworkController) addLogicalPortToNetworkForNAD(pod *k return nil } +// addPerPodSNATOps returns the ops that will add the SNAT towards masqueradeIP for this given pod +func (bsnc *BaseSecondaryNetworkController) addPerPodSNATOps(pod *kapi.Pod, podIPs []*net.IPNet) ([]ovsdb.Operation, error) { + if !bsnc.isPodScheduledinLocalZone(pod) { + // nothing to do if its a remote zone pod + return nil, nil + } + // we need to add per-pod SNATs for UDN networks + networkID, err := bsnc.getNetworkID() + if err != nil { + return nil, fmt.Errorf("failed to get networkID for network %q: %v", bsnc.GetNetworkName(), err) + } + masqIPs, err := udn.GetUDNGatewayMasqueradeIPs(networkID) + if err != nil { + return nil, fmt.Errorf("failed to get masquerade IPs, network %s (%d): %v", bsnc.GetNetworkName(), networkID, err) + } + + ops, err := addOrUpdatePodSNATOps(bsnc.nbClient, bsnc.GetNetworkScopedGWRouterName(pod.Spec.NodeName), masqIPs, podIPs, bsnc.NetInfo.GetNetworkScopedClusterSubnetSNATMatch(pod.Spec.NodeName), nil) + if err != nil { + return nil, fmt.Errorf("failed to construct SNAT pods for pod %s/%s which is part of network %s, err: %v", + pod.Namespace, pod.Name, bsnc.GetNetworkName(), err) + } + return ops, nil +} + // removePodForSecondaryNetwork tried to tear down a pod. It returns nil on success and error on failure; // failure indicates the pod tear down should be retried later. func (bsnc *BaseSecondaryNetworkController) removePodForSecondaryNetwork(pod *kapi.Pod, portInfoMap map[string]*lpInfo) error { @@ -374,27 +459,12 @@ func (bsnc *BaseSecondaryNetworkController) removePodForSecondaryNetwork(pod *ka } podDesc := pod.Namespace + "/" + pod.Name - klog.Infof("Deleting pod: %s for network %s", podDesc, bsnc.GetNetworkName()) // there is only a logical port for local pods or remote pods of layer2 // networks on interconnect, so only delete in these cases isLocalPod := bsnc.isPodScheduledinLocalZone(pod) hasLogicalPort := isLocalPod || bsnc.isLayer2Interconnect() - // otherwise just delete pod IPs from the namespace address set - if !hasLogicalPort { - if bsnc.doesNetworkRequireIPAM() && util.IsMultiNetworkPoliciesSupportEnabled() { - return bsnc.removeRemoteZonePodFromNamespaceAddressSet(pod) - } - - // except for localnet networks, continue the delete flow in case a node just - // became remote where we might still need to cleanup. On L3 networks - // the node switch is removed so there is no need to do this. - if bsnc.TopologyType() != types.LocalnetTopology { - return nil - } - } - // for a specific NAD belongs to this network, Pod's logical port might already be created half-way // without its lpInfo cache being created; need to deleted resources created for that NAD as well. // So, first get all nadNames from pod annotation, but handle NADs belong to this network only. @@ -407,28 +477,53 @@ func (bsnc *BaseSecondaryNetworkController) removePodForSecondaryNetwork(pod *ka portInfoMap = map[string]*lpInfo{} } - activeNetwork, err := bsnc.getActiveNetworkForNamespace(pod.Namespace) - if err != nil { - return fmt.Errorf("failed looking for the active network at namespace '%s': %w", pod.Namespace, err) - } - + var alreadyProcessed bool for nadName := range podNetworks { if !bsnc.HasNAD(nadName) { continue } - _, networkMap, err := util.GetPodNADToNetworkMappingWithActiveNetwork(pod, bsnc.NetInfo, activeNetwork) - if err != nil { - bsnc.recordPodErrorEvent(pod, err) - return err + // pod has a network managed by this controller + klog.Infof("Deleting pod: %s for network %s, NAD: %s", podDesc, bsnc.GetNetworkName(), nadName) + + // handle remote pod clean up but only do this one time + if !hasLogicalPort && !alreadyProcessed { + if bsnc.doesNetworkRequireIPAM() && + // address set is for network policy only. So either multi network policy is enabled or network + // segmentation, and it is a primary UDN (regular netpol) + (util.IsMultiNetworkPoliciesSupportEnabled() || (util.IsNetworkSegmentationSupportEnabled() && bsnc.IsPrimaryNetwork())) { + return bsnc.removeRemoteZonePodFromNamespaceAddressSet(pod) + } + + // except for localnet networks, continue the delete flow in case a node just + // became remote where we might still need to cleanup. On L3 networks + // the node switch is removed so there is no need to do this. + if bsnc.TopologyType() != types.LocalnetTopology { + return nil + } + alreadyProcessed = true } + if kubevirt.IsPodAllowedForMigration(pod, bsnc.NetInfo) { + if err = bsnc.enableSourceLSPFailedLiveMigration(pod, nadName); err != nil { + return err + } + } bsnc.logicalPortCache.remove(pod, nadName) pInfo, err := bsnc.deletePodLogicalPort(pod, portInfoMap[nadName], nadName) if err != nil { return err } + // Cleanup the SNAT entries before checking whether this controller handled the IP allocation + if util.IsNetworkSegmentationSupportEnabled() && bsnc.IsPrimaryNetwork() && config.Gateway.DisableSNATMultipleGWs { + // we need to delete per-pod SNATs for UDN networks + if err := bsnc.delPerPodSNAT(pod, nadName); err != nil { + return fmt.Errorf("failed to delete SNAT for pod %s/%s which is part of network %s, err: %v", + pod.Namespace, pod.Name, bsnc.GetNetworkName(), err) + } + } + // do not release IP address if this controller does not handle IP allocation if !bsnc.allocatesPodAnnotation() { continue @@ -440,32 +535,18 @@ func (bsnc *BaseSecondaryNetworkController) removePodForSecondaryNetwork(pod *ka continue } - network := networkMap[nadName] - - hasPersistentIPs := bsnc.allowPersistentIPs() - hasIPAMClaim := network != nil && network.IPAMClaimReference != "" - if hasIPAMClaim && !hasPersistentIPs { - klog.Errorf( - "Pod %s/%s referencing an IPAMClaim on network %q which does not honor it", - pod.GetNamespace(), - pod.GetName(), - bsnc.NetInfo.GetNetworkName(), - ) - hasIPAMClaim = false - } - if hasIPAMClaim { - ipamClaim, err := bsnc.ipamClaimsReconciler.FindIPAMClaim(network.IPAMClaimReference, network.Namespace) - hasIPAMClaim = ipamClaim != nil && len(ipamClaim.Status.IPs) > 0 - if apierrors.IsNotFound(err) { - klog.Errorf("Failed to retrieve IPAMClaim %q but will release IPs: %v", network.IPAMClaimReference, err) - } else if err != nil { - return fmt.Errorf("failed to get IPAMClaim %s/%s: %w", network.Namespace, network.IPAMClaimReference, err) + // if we allow for persistent IPs, then we need to check if this pod has an IPAM Claim + if bsnc.allowPersistentIPs() { + hasIPAMClaim, err := bsnc.hasIPAMClaim(pod, nadName) + if err != nil { + return fmt.Errorf("unable to determine if pod %s has IPAM Claim: %w", podDesc, err) + } + // if there is an IPAM claim, don't release the pod IPs + if hasIPAMClaim { + continue } } - if hasIPAMClaim { - continue - } // Releasing IPs needs to happen last so that we can deterministically know that if delete failed that // the IP of the pod needs to be released. Otherwise we could have a completed pod failed to be removed // and we dont know if the IP was released or not, and subsequently could accidentally release the IP @@ -477,6 +558,91 @@ func (bsnc *BaseSecondaryNetworkController) removePodForSecondaryNetwork(pod *ka } bsnc.forgetPodReleasedBeforeStartup(string(pod.UID), nadName) + + } + return nil +} + +// hasIPAMClaim determines whether a pod's IPAM is being handled by IPAMClaim CR. +// pod passed should already be validated as having a network connection to nadName +func (bsnc *BaseSecondaryNetworkController) hasIPAMClaim(pod *kapi.Pod, nadNamespacedName string) (bool, error) { + if !bsnc.AllowsPersistentIPs() { + return false, nil + } + + var ipamClaimName string + var wasPersistentIPRequested bool + if bsnc.IsPrimaryNetwork() { + // primary network ipam reference claim is on the annotation + ipamClaimName, wasPersistentIPRequested = pod.Annotations[util.OvnUDNIPAMClaimName] + } else { + // secondary network the IPAM claim reference is on the network selection element + nadKeys := strings.Split(nadNamespacedName, "/") + if len(nadKeys) != 2 { + return false, fmt.Errorf("invalid NAD name %s", nadNamespacedName) + } + nadNamespace := nadKeys[0] + nadName := nadKeys[1] + allNetworks, err := util.GetK8sPodAllNetworkSelections(pod) + if err != nil { + return false, err + } + for _, network := range allNetworks { + if network.Namespace == nadNamespace && network.Name == nadName { + // found network selection element, check if it has IPAM + if len(network.IPAMClaimReference) > 0 { + ipamClaimName = network.IPAMClaimReference + wasPersistentIPRequested = true + } + break + } + } + } + + if !wasPersistentIPRequested || len(ipamClaimName) == 0 { + return false, nil + } + + ipamClaim, err := bsnc.ipamClaimsReconciler.FindIPAMClaim(ipamClaimName, pod.Namespace) + if apierrors.IsNotFound(err) { + klog.Errorf("IPAMClaim %q for namespace: %q not found...will release IPs: %v", + ipamClaimName, pod.Namespace, err) + return false, nil + } else if err != nil { + return false, fmt.Errorf("failed to get IPAMClaim %s/%s: %w", pod.Namespace, ipamClaimName, err) + } + + hasIPAMClaim := ipamClaim != nil && len(ipamClaim.Status.IPs) > 0 + return hasIPAMClaim, nil +} + +// delPerPodSNAT will delete the SNAT towards masqueradeIP for this given pod +func (bsnc *BaseSecondaryNetworkController) delPerPodSNAT(pod *kapi.Pod, nadName string) error { + if !bsnc.isPodScheduledinLocalZone(pod) { + // nothing to do if its a remote zone pod + return nil + } + // we need to add per-pod SNATs for UDN networks + networkID, err := bsnc.getNetworkID() + if err != nil { + return fmt.Errorf("failed to get networkID for network %q: %v", bsnc.GetNetworkName(), err) + } + masqIPs, err := udn.GetUDNGatewayMasqueradeIPs(networkID) + if err != nil { + return fmt.Errorf("failed to get masquerade IPs, network %s (%d): %v", bsnc.GetNetworkName(), networkID, err) + } + podNetAnnotation, err := util.UnmarshalPodAnnotation(pod.Annotations, nadName) + if err != nil { + return fmt.Errorf("failed to fetch annotations for pod %s/%s in network %s; err: %v", pod.Namespace, pod.Name, bsnc.GetNetworkName(), err) + } + ops, err := deletePodSNATOps(bsnc.nbClient, nil, bsnc.GetNetworkScopedGWRouterName(pod.Spec.NodeName), masqIPs, podNetAnnotation.IPs, bsnc.GetNetworkScopedClusterSubnetSNATMatch(pod.Spec.NodeName)) + if err != nil { + return fmt.Errorf("failed to construct SNAT pods for pod %s/%s which is part of network %s, err: %v", + pod.Namespace, pod.Name, bsnc.GetNetworkName(), err) + } + if _, err = libovsdbops.TransactAndCheck(bsnc.nbClient, ops); err != nil { + return fmt.Errorf("failed to delete SNAT rule for pod %s/%s in network %s on gateway router %s: %w", + pod.Namespace, pod.Name, bsnc.GetNetworkName(), bsnc.GetNetworkScopedGWRouterName(pod.Spec.NodeName), err) } return nil } @@ -549,8 +715,7 @@ func (bsnc *BaseSecondaryNetworkController) syncPodsForSecondaryNetwork(pods []i } // addPodToNamespaceForSecondaryNetwork returns the ops needed to add pod's IP to the namespace's address set. -func (bsnc *BaseSecondaryNetworkController) addPodToNamespaceForSecondaryNetwork(ns string, ips []*net.IPNet) ([]ovsdb.Operation, error) { - var ops []ovsdb.Operation +func (bsnc *BaseSecondaryNetworkController) addPodToNamespaceForSecondaryNetwork(ns string, ips []*net.IPNet, portUUID string) ([]ovsdb.Operation, error) { var err error nsInfo, nsUnlock, err := bsnc.ensureNamespaceLockedForSecondaryNetwork(ns, true, nil) if err != nil { @@ -559,11 +724,7 @@ func (bsnc *BaseSecondaryNetworkController) addPodToNamespaceForSecondaryNetwork defer nsUnlock() - if ops, err = nsInfo.addressSet.AddAddressesReturnOps(util.IPNetsIPToStringSlice(ips)); err != nil { - return nil, err - } - - return ops, nil + return bsnc.addLocalPodToNamespaceLocked(nsInfo, ips, portUUID) } // AddNamespaceForSecondaryNetwork creates corresponding addressset in ovn db for secondary network @@ -634,21 +795,32 @@ func (bsnc *BaseSecondaryNetworkController) deleteNamespace4SecondaryNetwork(ns return nil } -// WatchMultiNetworkPolicy starts the watching of multinetworkpolicy resource and calls +// WatchNetworkPolicy starts the watching of networkpolicy resource and calls // back the appropriate handler logic -func (bsnc *BaseSecondaryNetworkController) WatchMultiNetworkPolicy() error { - if !util.IsMultiNetworkPoliciesSupportEnabled() { +func (bsnc *BaseSecondaryNetworkController) WatchNetworkPolicy() error { + if bsnc.netPolicyHandler != nil { return nil } + handler, err := bsnc.retryNetworkPolicies.WatchResource() + if err != nil { + return err + } + bsnc.netPolicyHandler = handler + return nil +} - if bsnc.policyHandler != nil { +// WatchMultiNetworkPolicy starts the watching of multinetworkpolicy resource and calls +// back the appropriate handler logic +func (bsnc *BaseSecondaryNetworkController) WatchMultiNetworkPolicy() error { + if bsnc.multiNetPolicyHandler != nil { return nil } - handler, err := bsnc.retryNetworkPolicies.WatchResource() + handler, err := bsnc.retryMultiNetworkPolicies.WatchResource() if err != nil { - bsnc.policyHandler = handler + return err } - return err + bsnc.multiNetPolicyHandler = handler + return nil } // cleanupPolicyLogicalEntities cleans up all the port groups and address sets that belong to the given controller @@ -687,22 +859,149 @@ func (bsnc *BaseSecondaryNetworkController) WatchIPAMClaims() error { func (oc *BaseSecondaryNetworkController) allowPersistentIPs() bool { return config.OVNKubernetesFeature.EnablePersistentIPs && - oc.NetInfo.AllowsPersistentIPs() && util.DoesNetworkRequireIPAM(oc.NetInfo) && - (oc.NetInfo.TopologyType() == types.Layer2Topology || oc.NetInfo.TopologyType() == types.LocalnetTopology) + util.AllowsPersistentIPs(oc.NetInfo) } func (oc *BaseSecondaryNetworkController) getNetworkID() (int, error) { - if oc.networkID == nil || *oc.networkID == util.InvalidNetworkID { - oc.networkID = ptr.To(util.InvalidNetworkID) + if oc.networkID == nil || *oc.networkID == util.InvalidID { + oc.networkID = ptr.To(util.InvalidID) nodes, err := oc.watchFactory.GetNodes() if err != nil { - return util.InvalidNetworkID, err + return util.InvalidID, err } *oc.networkID, err = util.GetNetworkID(nodes, oc.NetInfo) if err != nil { - return util.InvalidNetworkID, err + return util.InvalidID, err } } return *oc.networkID, nil } + +// buildUDNEgressSNAT is used to build the conditional SNAT required on L3 and L2 UDNs to +// steer traffic correctly via mp0 when leaving OVN to the host +func (bsnc *BaseSecondaryNetworkController) buildUDNEgressSNAT(localPodSubnets []*net.IPNet, outputPort string, + node *kapi.Node) ([]*nbdb.NAT, error) { + if len(localPodSubnets) == 0 { + return nil, nil // nothing to do + } + var snats []*nbdb.NAT + var masqIP *udn.MasqueradeIPs + var err error + networkID, err := bsnc.getNetworkID() + if err != nil { + return nil, fmt.Errorf("failed to get networkID for network %q: %v", bsnc.GetNetworkName(), err) + } + // legacy lookup for mac + dstMac, err := util.ParseNodeManagementPortMACAddresses(node, bsnc.GetNetworkName()) + if err != nil && !util.IsAnnotationNotSetError(err) { + return nil, fmt.Errorf("failed to parse mac address annotation for network %q on node %q, err: %w", + bsnc.GetNetworkName(), node.Name, err) + } + if len(dstMac) == 0 && len(localPodSubnets) > 0 { + // calculate MAC + dstMac = util.IPAddrToHWAddr(util.GetNodeManagementIfAddr(localPodSubnets[0]).IP) + } + + extIDs := map[string]string{ + types.NetworkExternalID: bsnc.GetNetworkName(), + types.TopologyExternalID: bsnc.TopologyType(), + } + for _, localPodSubnet := range localPodSubnets { + if utilnet.IsIPv6CIDR(localPodSubnet) { + masqIP, err = udn.AllocateV6MasqueradeIPs(networkID) + } else { + masqIP, err = udn.AllocateV4MasqueradeIPs(networkID) + } + if err != nil { + return nil, err + } + if masqIP == nil { + return nil, fmt.Errorf("masquerade IP cannot be empty network %s (%d): %v", bsnc.GetNetworkName(), networkID, err) + } + snats = append(snats, libovsdbops.BuildSNATWithMatch(&masqIP.ManagementPort.IP, localPodSubnet, outputPort, + extIDs, getMasqueradeManagementIPSNATMatch(dstMac.String()))) + } + return snats, nil +} + +func (bsnc *BaseSecondaryNetworkController) ensureDHCP(pod *corev1.Pod, podAnnotation *util.PodAnnotation, lsp *nbdb.LogicalSwitchPort) error { + opts := []kubevirt.DHCPConfigsOpt{} + + ipv4DNSServer, ipv6DNSServer, err := kubevirt.RetrieveDNSServiceClusterIPs(bsnc.watchFactory) + if err != nil { + return err + } + + ipv4Gateway, _ := util.MatchFirstIPFamily(false /*ipv4*/, podAnnotation.Gateways) + if ipv4Gateway != nil { + opts = append(opts, kubevirt.WithIPv4Router(ipv4Gateway.String())) + } + + if bsnc.MTU() > 0 { + opts = append(opts, kubevirt.WithIPv4MTU(bsnc.MTU())) + } + + opts = append(opts, kubevirt.WithIPv4DNSServer(ipv4DNSServer), kubevirt.WithIPv6DNSServer(ipv6DNSServer)) + + return kubevirt.EnsureDHCPOptionsForLSP(bsnc.controllerName, bsnc.nbClient, pod, podAnnotation.IPs, lsp, opts...) +} + +func getMasqueradeManagementIPSNATMatch(dstMac string) string { + return fmt.Sprintf("eth.dst == %s", dstMac) +} + +func (bsnc *BaseSecondaryNetworkController) requireDHCP(pod *corev1.Pod) bool { + // Configure DHCP only for kubevirt VMs layer2 primary udn with subnets + return kubevirt.IsPodOwnedByVirtualMachine(pod) && + util.IsNetworkSegmentationSupportEnabled() && + bsnc.IsPrimaryNetwork() && + bsnc.TopologyType() == types.Layer2Topology +} + +func (bsnc *BaseSecondaryNetworkController) setPodLogicalSwitchPortEnabledField( + pod *corev1.Pod, nadName string, ops []ovsdb.Operation, enabled bool) ([]ovsdb.Operation, *nbdb.LogicalSwitchPort, error) { + lsp := &nbdb.LogicalSwitchPort{Name: bsnc.GetLogicalPortName(pod, nadName)} + lsp.Enabled = ptr.To(enabled) + switchName, err := bsnc.getExpectedSwitchName(pod) + if err != nil { + return nil, nil, fmt.Errorf("failed to fetch switch name for pod %s: %w", pod.Name, err) + } + customFields := []libovsdbops.ModelUpdateField{libovsdbops.LogicalSwitchPortEnabled} + ops, err = libovsdbops.UpdateLogicalSwitchPortsOnSwitchWithCustomFieldsOps(bsnc.nbClient, ops, &nbdb.LogicalSwitch{Name: switchName}, customFields, lsp) + if err != nil { + return nil, nil, fmt.Errorf("failed updating logical switch port %+v on switch %s: %w", *lsp, switchName, err) + } + return ops, lsp, nil +} + +func (bsnc *BaseSecondaryNetworkController) disableLiveMigrationSourceLSPOps( + kubevirtLiveMigrationStatus *kubevirt.LiveMigrationStatus, + nadName string, ops []ovsdb.Operation) ([]ovsdb.Operation, error) { + // closing the sourcePod lsp to ensure traffic goes to the now ready targetPod. + ops, _, err := bsnc.setPodLogicalSwitchPortEnabledField(kubevirtLiveMigrationStatus.SourcePod, nadName, ops, false) + return ops, err +} + +func (bsnc *BaseSecondaryNetworkController) enableSourceLSPFailedLiveMigration(pod *corev1.Pod, nadName string) error { + kubevirtLiveMigrationStatus, err := kubevirt.DiscoverLiveMigrationStatus(bsnc.watchFactory, pod) + if err != nil { + return fmt.Errorf("failed to discover Live-migration status after pod termination: %w", err) + } + if kubevirtLiveMigrationStatus == nil || + pod.Name != kubevirtLiveMigrationStatus.TargetPod.Name || + kubevirtLiveMigrationStatus.State != kubevirt.LiveMigrationFailed { + return nil + } + // make sure sourcePod lsp is enabled if migration failed after DomainReady was set. + ops, sourcePodLsp, err := bsnc.setPodLogicalSwitchPortEnabledField(kubevirtLiveMigrationStatus.SourcePod, nadName, nil, true) + if err != nil { + return fmt.Errorf("failed to set source Pod lsp to enabled after migration failed: %w", err) + } + _, err = libovsdbops.TransactAndCheckAndSetUUIDs(bsnc.nbClient, sourcePodLsp, ops) + if err != nil { + return fmt.Errorf("failed transacting operations %+v: %w", ops, err) + } + + return nil +} diff --git a/go-controller/pkg/ovn/base_network_controller_secondary_test.go b/go-controller/pkg/ovn/base_network_controller_secondary_test.go index 9487aa7769..01f7fe8b11 100644 --- a/go-controller/pkg/ovn/base_network_controller_secondary_test.go +++ b/go-controller/pkg/ovn/base_network_controller_secondary_test.go @@ -1,13 +1,19 @@ package ovn import ( - . "github.com/onsi/ginkgo" + "context" + + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + kubevirtv1 "kubevirt.io/api/core/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" + libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) @@ -17,6 +23,10 @@ var _ = Describe("BaseSecondaryNetworkController", func() { nad = ovntest.GenerateNAD("bluenet", "rednad", "greenamespace", types.Layer3Topology, "100.128.0.0/16", types.NetworkRolePrimary) ) + BeforeEach(func() { + // Restore global default values before each testcase + Expect(config.PrepareTestConfig()).To(Succeed()) + }) It("should return networkID from one of the nodes node", func() { fakeOVN := NewFakeOVN(false) fakeOVN.start(&corev1.Node{ @@ -51,7 +61,185 @@ var _ = Describe("BaseSecondaryNetworkController", func() { networkID, err := controller.bnc.getNetworkID() Expect(err).To(HaveOccurred()) - Expect(networkID).To(Equal(util.InvalidNetworkID)) + Expect(networkID).To(Equal(util.InvalidID)) }) + type dhcpTest struct { + vmName string + ips []string + dns []string + gateways []string + expectedDHCPv4Options *nbdb.DHCPOptions + expectedDHCPv6Options *nbdb.DHCPOptions + } + DescribeTable("with layer2 primary UDN when configuring DHCP", func(t dhcpTest) { + layer2NAD := ovntest.GenerateNAD("bluenet", "rednad", "greenamespace", + types.Layer2Topology, "100.128.0.0/16", types.NetworkRolePrimary) + fakeOVN := NewFakeOVN(true) + lsp := &nbdb.LogicalSwitchPort{ + Name: "vm-port", + UUID: "vm-port-UUID", + } + logicalSwitch := &nbdb.LogicalSwitch{ + UUID: "layer2-switch-UUID", + Name: "layer2-switch", + Ports: []string{lsp.UUID}, + } + + initialDB := libovsdbtest.TestSetup{ + NBData: []libovsdbtest.TestData{ + logicalSwitch, + lsp, + }, + } + fakeOVN.startWithDBSetup( + initialDB, + &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "worker1", + Annotations: map[string]string{ + "k8s.ovn.org/network-ids": `{"bluenet": "3"}`, + }, + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "kube-system", + Name: "kube-dns", + }, + Spec: corev1.ServiceSpec{ + ClusterIPs: t.dns, + }, + }, + ) + defer fakeOVN.shutdown() + + Expect(fakeOVN.NewSecondaryNetworkController(layer2NAD)).To(Succeed()) + controller, ok := fakeOVN.secondaryControllers["bluenet"] + Expect(ok).To(BeTrue()) + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "dummy", + Labels: map[string]string{ + kubevirtv1.VirtualMachineNameLabel: t.vmName, + }, + }, + } + ips, err := util.ParseIPNets(t.ips) + Expect(err).ToNot(HaveOccurred()) + podAnnotation := &util.PodAnnotation{ + IPs: ips, + } + Expect(controller.bnc.ensureDHCP(pod, podAnnotation, lsp)).To(Succeed()) + expectedDB := []libovsdbtest.TestData{} + + By("asserting the OVN entities provisioned in the NBDB are the expected ones") + expectedLSP := lsp.DeepCopy() + if t.expectedDHCPv4Options != nil { + t.expectedDHCPv4Options.UUID = "vm1-dhcpv4-UUID" + expectedLSP.Dhcpv4Options = &t.expectedDHCPv4Options.UUID + expectedDB = append(expectedDB, t.expectedDHCPv4Options) + } + if t.expectedDHCPv6Options != nil { + t.expectedDHCPv6Options.UUID = "vm1-dhcpv6-UUID" + expectedLSP.Dhcpv6Options = &t.expectedDHCPv6Options.UUID + expectedDB = append(expectedDB, t.expectedDHCPv6Options) + } + // Refresh logical switch to have the propert ports uuid + obtainedLogicalSwitches := []*nbdb.LogicalSwitch{} + Expect(fakeOVN.nbClient.List(context.Background(), &obtainedLogicalSwitches)).To(Succeed()) + expectedDB = append(expectedDB, + obtainedLogicalSwitches[0], + expectedLSP, + ) + Expect(fakeOVN.nbClient).To(libovsdbtest.HaveData(expectedDB)) + + }, + Entry("for ipv4 singlestack", dhcpTest{ + vmName: "vm1", + dns: []string{"10.96.0.100"}, + ips: []string{"192.168.100.4/24"}, + expectedDHCPv4Options: &nbdb.DHCPOptions{ + Cidr: "192.168.100.0/24", + ExternalIDs: map[string]string{ + "k8s.ovn.org/cidr": "192.168.100.0/24", + "k8s.ovn.org/id": "bluenet-network-controller:VirtualMachine:foo/vm1:192.168.100.0/24", + "k8s.ovn.org/zone": "local", + "k8s.ovn.org/owner-controller": "bluenet-network-controller", + "k8s.ovn.org/owner-type": "VirtualMachine", + "k8s.ovn.org/name": "foo/vm1", + }, + Options: map[string]string{ + "lease_time": "3500", + "server_mac": "0a:58:a9:fe:01:01", + "hostname": "\"vm1\"", + "mtu": "1300", + "dns_server": "10.96.0.100", + "server_id": "169.254.1.1", + }, + }, + }), + Entry("for ipv6 singlestack", dhcpTest{ + vmName: "vm1", + dns: []string{"2015:100:200::10"}, + ips: []string{"2010:100:200::2/60"}, + expectedDHCPv6Options: &nbdb.DHCPOptions{ + Cidr: "2010:100:200::/60", + ExternalIDs: map[string]string{ + "k8s.ovn.org/name": "foo/vm1", + "k8s.ovn.org/cidr": "2010.100.200../60", + "k8s.ovn.org/id": "bluenet-network-controller:VirtualMachine:foo/vm1:2010.100.200../60", + "k8s.ovn.org/zone": "local", + "k8s.ovn.org/owner-controller": "bluenet-network-controller", + "k8s.ovn.org/owner-type": "VirtualMachine", + }, + Options: map[string]string{ + "server_id": "0a:58:6d:6d:c1:50", + "fqdn": "\"vm1\"", + "dns_server": "2015:100:200::10", + }, + }, + }), + Entry("for dualstack", dhcpTest{ + vmName: "vm1", + dns: []string{"10.96.0.100", "2015:100:200::10"}, + ips: []string{"192.168.100.4/24", "2010:100:200::2/60"}, + expectedDHCPv4Options: &nbdb.DHCPOptions{ + Cidr: "192.168.100.0/24", + ExternalIDs: map[string]string{ + "k8s.ovn.org/cidr": "192.168.100.0/24", + "k8s.ovn.org/id": "bluenet-network-controller:VirtualMachine:foo/vm1:192.168.100.0/24", + "k8s.ovn.org/zone": "local", + "k8s.ovn.org/owner-controller": "bluenet-network-controller", + "k8s.ovn.org/owner-type": "VirtualMachine", + "k8s.ovn.org/name": "foo/vm1", + }, + Options: map[string]string{ + "lease_time": "3500", + "server_mac": "0a:58:a9:fe:01:01", + "hostname": "\"vm1\"", + "mtu": "1300", + "dns_server": "10.96.0.100", + "server_id": "169.254.1.1", + }, + }, + expectedDHCPv6Options: &nbdb.DHCPOptions{ + Cidr: "2010:100:200::/60", + ExternalIDs: map[string]string{ + "k8s.ovn.org/name": "foo/vm1", + "k8s.ovn.org/cidr": "2010.100.200../60", + "k8s.ovn.org/id": "bluenet-network-controller:VirtualMachine:foo/vm1:2010.100.200../60", + "k8s.ovn.org/zone": "local", + "k8s.ovn.org/owner-controller": "bluenet-network-controller", + "k8s.ovn.org/owner-type": "VirtualMachine", + }, + Options: map[string]string{ + "server_id": "0a:58:6d:6d:c1:50", + "fqdn": "\"vm1\"", + "dns_server": "2015:100:200::10", + }, + }, + }), + ) }) diff --git a/go-controller/pkg/ovn/base_secondary_layer2_network_controller.go b/go-controller/pkg/ovn/base_secondary_layer2_network_controller.go index bde5928d01..71d6df0849 100644 --- a/go-controller/pkg/ovn/base_secondary_layer2_network_controller.go +++ b/go-controller/pkg/ovn/base_secondary_layer2_network_controller.go @@ -8,6 +8,7 @@ import ( libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" utilerrors "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/errors" corev1 "k8s.io/api/core/v1" @@ -33,8 +34,11 @@ func (oc *BaseSecondaryLayer2NetworkController) stop() { if oc.ipamClaimsHandler != nil { oc.watchFactory.RemoveIPAMClaimsHandler(oc.ipamClaimsHandler) } - if oc.policyHandler != nil { - oc.watchFactory.RemoveMultiNetworkPolicyHandler(oc.policyHandler) + if oc.netPolicyHandler != nil { + oc.watchFactory.RemovePolicyHandler(oc.netPolicyHandler) + } + if oc.multiNetPolicyHandler != nil { + oc.watchFactory.RemoveMultiNetworkPolicyHandler(oc.multiNetPolicyHandler) } if oc.podHandler != nil { oc.watchFactory.RemovePodHandler(oc.podHandler) @@ -100,22 +104,29 @@ func (oc *BaseSecondaryLayer2NetworkController) run() error { return err } - // WatchMultiNetworkPolicy depends on WatchPods and WatchNamespaces - if err := oc.WatchMultiNetworkPolicy(); err != nil { - return err + if util.IsMultiNetworkPoliciesSupportEnabled() { + // WatchMultiNetworkPolicy depends on WatchPods and WatchNamespaces + if err := oc.WatchMultiNetworkPolicy(); err != nil { + return err + } + } + + if oc.IsPrimaryNetwork() { + // WatchNetworkPolicy depends on WatchPods and WatchNamespaces + if err := oc.WatchNetworkPolicy(); err != nil { + return err + } } return nil } func (oc *BaseSecondaryLayer2NetworkController) initializeLogicalSwitch(switchName string, clusterSubnets []config.CIDRNetworkEntry, - excludeSubnets []*net.IPNet) (*nbdb.LogicalSwitch, error) { + excludeSubnets []*net.IPNet, clusterLoadBalancerGroupUUID, switchLoadBalancerGroupUUID string) (*nbdb.LogicalSwitch, error) { logicalSwitch := nbdb.LogicalSwitch{ Name: switchName, - ExternalIDs: map[string]string{}, + ExternalIDs: util.GenerateExternalIDsForSwitchOrRouter(oc.NetInfo), } - logicalSwitch.ExternalIDs[types.NetworkExternalID] = oc.GetNetworkName() - logicalSwitch.ExternalIDs[types.TopologyExternalID] = oc.TopologyType() hostSubnets := make([]*net.IPNet, 0, len(clusterSubnets)) for _, clusterSubnet := range clusterSubnets { @@ -135,6 +146,10 @@ func (oc *BaseSecondaryLayer2NetworkController) initializeLogicalSwitch(switchNa } } + if clusterLoadBalancerGroupUUID != "" && switchLoadBalancerGroupUUID != "" { + logicalSwitch.LoadBalancerGroup = []string{clusterLoadBalancerGroupUUID, switchLoadBalancerGroupUUID} + } + err := libovsdbops.CreateOrUpdateLogicalSwitch(oc.nbClient, &logicalSwitch) if err != nil { return nil, fmt.Errorf("failed to create logical switch %+v: %v", logicalSwitch, err) diff --git a/go-controller/pkg/ovn/baseline_admin_network_policy_test.go b/go-controller/pkg/ovn/baseline_admin_network_policy_test.go index 1eab1bbbdc..7a570475dc 100644 --- a/go-controller/pkg/ovn/baseline_admin_network_policy_test.go +++ b/go-controller/pkg/ovn/baseline_admin_network_policy_test.go @@ -5,7 +5,7 @@ import ( "fmt" "time" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" diff --git a/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy.go b/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy.go index 2e5d821399..1520b4561e 100644 --- a/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy.go +++ b/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy.go @@ -33,7 +33,7 @@ func (c *Controller) processNextANPWorkItem(wg *sync.WaitGroup) bool { } defer c.anpQueue.Done(anpKey) - err := c.syncAdminNetworkPolicy(anpKey.(string)) + err := c.syncAdminNetworkPolicy(anpKey) if err == nil { c.anpQueue.Forget(anpKey) return true @@ -571,7 +571,7 @@ func (c *Controller) createNewANP(desiredANPState *adminNetworkPolicyState, desi return fmt.Errorf("failed to create address-sets, %v", err) } ops = append(ops, addrSetOps...) - ops, err = libovsdbops.CreateOrUpdateACLsOps(c.nbClient, ops, desiredACLs...) + ops, err = libovsdbops.CreateOrUpdateACLsOps(c.nbClient, ops, c.GetSamplingConfig(), desiredACLs...) if err != nil { return fmt.Errorf("failed to create ACL ops: %v", err) } @@ -672,7 +672,7 @@ func (c *Controller) updateExistingANP(currentANPState, desiredANPState *adminNe if fullPeerRecompute || atLeastOneRuleUpdated || hasPriorityChanged || hasACLLoggingParamsChanged { klog.V(3).Infof("ANP %s with priority %d was updated", desiredANPState.name, desiredANPState.anpPriority) // now update the acls to the desired ones - ops, err = libovsdbops.CreateOrUpdateACLsOps(c.nbClient, ops, desiredACLs...) + ops, err = libovsdbops.CreateOrUpdateACLsOps(c.nbClient, ops, c.GetSamplingConfig(), desiredACLs...) if err != nil { return fmt.Errorf("failed to create new ACL ops for anp %s: %v", desiredANPState.name, err) } diff --git a/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy_controller.go b/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy_controller.go index 1ee4e47620..ea4299d98c 100644 --- a/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy_controller.go +++ b/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy_controller.go @@ -8,6 +8,8 @@ import ( libovsdbclient "github.com/ovn-org/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/observability" addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" v1 "k8s.io/api/core/v1" @@ -76,8 +78,8 @@ type Controller struct { banpCache *adminNetworkPolicyState // queues for the CRDs where incoming work is placed to de-dup - anpQueue workqueue.RateLimitingInterface - banpQueue workqueue.RateLimitingInterface + anpQueue workqueue.TypedRateLimitingInterface[string] + banpQueue workqueue.TypedRateLimitingInterface[string] // cached access to anp and banp objects anpLister anplister.AdminNetworkPolicyLister banpLister anplister.BaselineAdminNetworkPolicyLister @@ -86,15 +88,17 @@ type Controller struct { // namespace queue, cache, lister anpNamespaceLister corev1listers.NamespaceLister anpNamespaceSynced cache.InformerSynced - anpNamespaceQueue workqueue.RateLimitingInterface + anpNamespaceQueue workqueue.TypedRateLimitingInterface[string] // pod queue, cache, lister anpPodLister corev1listers.PodLister anpPodSynced cache.InformerSynced - anpPodQueue workqueue.RateLimitingInterface + anpPodQueue workqueue.TypedRateLimitingInterface[string] // node queue, cache, lister anpNodeLister corev1listers.NodeLister anpNodeSynced cache.InformerSynced - anpNodeQueue workqueue.RateLimitingInterface + anpNodeQueue workqueue.TypedRateLimitingInterface[string] + + observManager *observability.Manager } // NewController returns a new *Controller. @@ -110,7 +114,8 @@ func NewController( addressSetFactory addressset.AddressSetFactory, isPodScheduledinLocalZone func(*v1.Pod) bool, zone string, - recorder record.EventRecorder) (*Controller, error) { + recorder record.EventRecorder, + observManager *observability.Manager) (*Controller, error) { c := &Controller{ controllerName: controllerName, @@ -122,15 +127,16 @@ func NewController( anpCache: make(map[string]*adminNetworkPolicyState), anpPriorityMap: make(map[int32]string), banpCache: &adminNetworkPolicyState{}, // safe to initialise pointer to empty struct than nil + observManager: observManager, } klog.V(5).Info("Setting up event handlers for Admin Network Policy") // setup anp informers, listers, queue c.anpLister = anpInformer.Lister() c.anpCacheSynced = anpInformer.Informer().HasSynced - c.anpQueue = workqueue.NewNamedRateLimitingQueue( - workqueue.NewItemFastSlowRateLimiter(1*time.Second, 5*time.Second, 5), - "adminNetworkPolicy", + c.anpQueue = workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemFastSlowRateLimiter[string](1*time.Second, 5*time.Second, 5), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "adminNetworkPolicy"}, ) _, err := anpInformer.Informer().AddEventHandler(factory.WithUpdateHandlingForObjReplace(cache.ResourceEventHandlerFuncs{ AddFunc: c.onANPAdd, @@ -146,9 +152,9 @@ func NewController( // setup banp informers, listers, queue c.banpLister = banpInformer.Lister() c.banpCacheSynced = banpInformer.Informer().HasSynced - c.banpQueue = workqueue.NewNamedRateLimitingQueue( - workqueue.NewItemFastSlowRateLimiter(1*time.Second, 5*time.Second, 5), - "baselineAdminNetworkPolicy", + c.banpQueue = workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemFastSlowRateLimiter[string](1*time.Second, 5*time.Second, 5), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "baselineAdminNetworkPolicy"}, ) _, err = banpInformer.Informer().AddEventHandler(factory.WithUpdateHandlingForObjReplace(cache.ResourceEventHandlerFuncs{ AddFunc: c.onBANPAdd, @@ -162,9 +168,9 @@ func NewController( klog.V(5).Info("Setting up event handlers for Namespaces in Admin Network Policy controller") c.anpNamespaceLister = namespaceInformer.Lister() c.anpNamespaceSynced = namespaceInformer.Informer().HasSynced - c.anpNamespaceQueue = workqueue.NewNamedRateLimitingQueue( - workqueue.NewItemFastSlowRateLimiter(1*time.Second, 5*time.Second, 5), - "anpNamespaces", + c.anpNamespaceQueue = workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemFastSlowRateLimiter[string](1*time.Second, 5*time.Second, 5), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "anpNamespaces"}, ) _, err = namespaceInformer.Informer().AddEventHandler(factory.WithUpdateHandlingForObjReplace(cache.ResourceEventHandlerFuncs{ AddFunc: c.onANPNamespaceAdd, @@ -178,9 +184,9 @@ func NewController( klog.V(5).Info("Setting up event handlers for Pods in Admin Network Policy controller") c.anpPodLister = podInformer.Lister() c.anpPodSynced = podInformer.Informer().HasSynced - c.anpPodQueue = workqueue.NewNamedRateLimitingQueue( - workqueue.NewItemFastSlowRateLimiter(1*time.Second, 5*time.Second, 5), - "anpPods", + c.anpPodQueue = workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemFastSlowRateLimiter[string](1*time.Second, 5*time.Second, 5), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "anpPods"}, ) _, err = podInformer.Informer().AddEventHandler(factory.WithUpdateHandlingForObjReplace(cache.ResourceEventHandlerFuncs{ AddFunc: c.onANPPodAdd, @@ -194,9 +200,9 @@ func NewController( klog.V(5).Info("Setting up event handlers for Nodes in Admin Network Policy controller") c.anpNodeLister = nodeInformer.Lister() c.anpNodeSynced = podInformer.Informer().HasSynced - c.anpNodeQueue = workqueue.NewNamedRateLimitingQueue( - workqueue.NewItemFastSlowRateLimiter(1*time.Second, 5*time.Second, 5), - "anpNodes", + c.anpNodeQueue = workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemFastSlowRateLimiter[string](1*time.Second, 5*time.Second, 5), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "anpNodes"}, ) _, err = nodeInformer.Informer().AddEventHandler(factory.WithUpdateHandlingForObjReplace(cache.ResourceEventHandlerFuncs{ AddFunc: c.onANPNodeAdd, @@ -588,3 +594,10 @@ func (c *Controller) onANPNodeDelete(obj interface{}) { klog.V(5).Infof("Deleting Node Admin Network Policy %s", key) c.anpNodeQueue.Add(key) } + +func (c *Controller) GetSamplingConfig() *libovsdbops.SamplingConfig { + if c.observManager != nil { + return c.observManager.SamplingConfig() + } + return nil +} diff --git a/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy_namespace.go b/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy_namespace.go index d5652d4fb6..9c3c55b234 100644 --- a/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy_namespace.go +++ b/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy_namespace.go @@ -23,7 +23,7 @@ func (c *Controller) processNextANPNamespaceWorkItem(wg *sync.WaitGroup) bool { } defer c.anpNamespaceQueue.Done(anpNSKey) - err := c.syncAdminNetworkPolicyNamespace(anpNSKey.(string)) + err := c.syncAdminNetworkPolicyNamespace(anpNSKey) if err == nil { c.anpNamespaceQueue.Forget(anpNSKey) return true @@ -102,7 +102,7 @@ func (c *Controller) syncAdminNetworkPolicyNamespace(key string) error { // clearNamespaceFor(B)ANP will handle the logic for figuring out if the provided namespace name // used to match the given anpCache.name policy. If so, it will requeue the anpCache.name key back // into the main (b)anpQueue cache for reconciling the db objects. If not, function is a no-op. -func (c *Controller) clearNamespaceForANP(name string, anpCache *adminNetworkPolicyState, queue workqueue.RateLimitingInterface) { +func (c *Controller) clearNamespaceForANP(name string, anpCache *adminNetworkPolicyState, queue workqueue.TypedRateLimitingInterface[string]) { // (i) if this namespace used to match this ANP's .Spec.Subject requeue it and return // (ii) if (i) is false, check if it used to match any of the .Spec.Ingress.Peers requeue it and return // (iii) if (i) & (ii) are false, check if it used to match any of the .Spec.Egress.Peers requeue it and return @@ -135,7 +135,7 @@ func (c *Controller) clearNamespaceForANP(name string, anpCache *adminNetworkPol // used to match the given anpCache.name policy or if it started matching the given anpCache.name. // If so, it will requeue the anpCache.name key back into the main (b)anpQueue cache for reconciling // the db objects. If not, function is a no-op. -func (c *Controller) setNamespaceForANP(namespace *v1.Namespace, anpCache *adminNetworkPolicyState, queue workqueue.RateLimitingInterface) { +func (c *Controller) setNamespaceForANP(namespace *v1.Namespace, anpCache *adminNetworkPolicyState, queue workqueue.TypedRateLimitingInterface[string]) { // (i) if this namespace used to match this ANP's .Spec.Subject requeue it and return OR // (ii) if this namespace started to match this ANP's .Spec.Subject requeue it and return OR // (iii) if above conditions are are false, check if it used to match any of the .Spec.Ingress.Peers requeue it and return OR diff --git a/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy_node.go b/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy_node.go index c951486719..9fe234a07e 100644 --- a/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy_node.go +++ b/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy_node.go @@ -22,7 +22,7 @@ func (c *Controller) processNextANPNodeWorkItem(wg *sync.WaitGroup) bool { return false } defer c.anpNodeQueue.Done(anpNodeKey) - err := c.syncAdminNetworkPolicyNode(anpNodeKey.(string)) + err := c.syncAdminNetworkPolicyNode(anpNodeKey) if err == nil { c.anpNodeQueue.Forget(anpNodeKey) return true @@ -104,7 +104,7 @@ func (c *Controller) syncAdminNetworkPolicyNode(key string) error { // clearNodeFor(B)ANP will handle the logic for figuring out if the provided node name // used to match the given anpCache.name policy. If so, it will requeue the anpCache.name key back // into the main (b)anpQueue cache for reconciling the db objects. If not, function is a no-op. -func (c *Controller) clearNodeForANP(name string, anpCache *adminNetworkPolicyState, queue workqueue.RateLimitingInterface) { +func (c *Controller) clearNodeForANP(name string, anpCache *adminNetworkPolicyState, queue workqueue.TypedRateLimitingInterface[string]) { // (i) check if it used to match any of the .Spec.Egress.Peers requeue it and return for _, rule := range anpCache.egressRules { for _, peer := range rule.peers { @@ -121,7 +121,7 @@ func (c *Controller) clearNodeForANP(name string, anpCache *adminNetworkPolicySt // used to match the given anpCache.name policy or if it started matching the given anpCache.name. // If so, it will requeue the anpCache.name key back into the main (b)anpQueue cache for reconciling // the db objects. If not, function is a no-op. -func (c *Controller) setNodeForANP(node *v1.Node, anpCache *adminNetworkPolicyState, queue workqueue.RateLimitingInterface) { +func (c *Controller) setNodeForANP(node *v1.Node, anpCache *adminNetworkPolicyState, queue workqueue.TypedRateLimitingInterface[string]) { // (i) if above conditions are are false, check if it used to match any of the .Spec.Egress.Peers requeue it and return OR // (ii) check if it started to match any of the .Spec.Egress.Peers requeue it and return // The goal is to check if this node matches the ANP in at least one of the above ways, we immediately add key diff --git a/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy_pod.go b/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy_pod.go index 37c7b90eea..0a6d097ebe 100644 --- a/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy_pod.go +++ b/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy_pod.go @@ -23,7 +23,7 @@ func (c *Controller) processNextANPPodWorkItem(wg *sync.WaitGroup) bool { return false } defer c.anpPodQueue.Done(anpPodKey) - err := c.syncAdminNetworkPolicyPod(anpPodKey.(string)) + err := c.syncAdminNetworkPolicyPod(anpPodKey) if err == nil { c.anpPodQueue.Forget(anpPodKey) return true @@ -122,7 +122,7 @@ func (c *Controller) syncAdminNetworkPolicyPod(key string) error { // clearPodForANP will handle the logic for figuring out if the provided pod name // used to match the given anpCache.name policy. If so, it will requeue the anpCache.name key back // into the main (b)anpQueue cache for reconciling the db objects. If not, function is a no-op. -func (c *Controller) clearPodForANP(namespace, name string, anpCache *adminNetworkPolicyState, queue workqueue.RateLimitingInterface) { +func (c *Controller) clearPodForANP(namespace, name string, anpCache *adminNetworkPolicyState, queue workqueue.TypedRateLimitingInterface[string]) { // (i) if this pod used to match this ANP's .Spec.Subject requeue it and return // (ii) if (i) is false, check if it used to match any of the .Spec.Ingress.Peers requeue it and return // (iii) if (i) & (ii) are false, check if it used to match any of the .Spec.Egress.Peers requeue it and return @@ -162,7 +162,7 @@ func (c *Controller) clearPodForANP(namespace, name string, anpCache *adminNetwo // used to match the given anpCache.name policy or if it started matching the given anpCache.name. // If so, it will requeue the anpCache.name key back into the main (b)anpQueue cache for reconciling // the db objects. If not, function is a no-op. -func (c *Controller) setPodForANP(pod *v1.Pod, anpCache *adminNetworkPolicyState, namespaceLabels labels.Labels, queue workqueue.RateLimitingInterface) { +func (c *Controller) setPodForANP(pod *v1.Pod, anpCache *adminNetworkPolicyState, namespaceLabels labels.Labels, queue workqueue.TypedRateLimitingInterface[string]) { // (i) if this pod used to match this ANP's .Spec.Subject requeue it and return OR // (ii) if this pod started to match this ANP's .Spec.Subject requeue it and return OR // (iii) if above conditions are are false, check if it used to match any of the .Spec.Ingress.Peers requeue it and return OR diff --git a/go-controller/pkg/ovn/controller/admin_network_policy/baseline_admin_network_policy.go b/go-controller/pkg/ovn/controller/admin_network_policy/baseline_admin_network_policy.go index c9525d54b3..7337084d42 100644 --- a/go-controller/pkg/ovn/controller/admin_network_policy/baseline_admin_network_policy.go +++ b/go-controller/pkg/ovn/controller/admin_network_policy/baseline_admin_network_policy.go @@ -23,7 +23,7 @@ func (c *Controller) processNextBANPWorkItem(wg *sync.WaitGroup) bool { } defer c.banpQueue.Done(banpKey) - err := c.syncBaselineAdminNetworkPolicy(banpKey.(string)) + err := c.syncBaselineAdminNetworkPolicy(banpKey) if err == nil { c.banpQueue.Forget(banpKey) return true diff --git a/go-controller/pkg/ovn/controller/admin_network_policy/status_test.go b/go-controller/pkg/ovn/controller/admin_network_policy/status_test.go index 213a758451..2b26f1c93f 100644 --- a/go-controller/pkg/ovn/controller/admin_network_policy/status_test.go +++ b/go-controller/pkg/ovn/controller/admin_network_policy/status_test.go @@ -4,7 +4,7 @@ import ( "context" "testing" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" libovsdbclient "github.com/ovn-org/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" @@ -127,6 +127,7 @@ func newANPControllerWithDBSetup(dbSetup libovsdbtest.TestSetup, initANPs anpapi nil, // we don't care about pods in this test "targaryen", recorder, + nil, ) gomega.Expect(err).ToNot(gomega.HaveOccurred()) diff --git a/go-controller/pkg/ovn/controller/apbroute/apbroute_suite_test.go b/go-controller/pkg/ovn/controller/apbroute/apbroute_suite_test.go index 1305fd2bad..8f9c4a6171 100644 --- a/go-controller/pkg/ovn/controller/apbroute/apbroute_suite_test.go +++ b/go-controller/pkg/ovn/controller/apbroute/apbroute_suite_test.go @@ -3,7 +3,7 @@ package apbroute import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/ovn/controller/apbroute/external_controller.go b/go-controller/pkg/ovn/controller/apbroute/external_controller.go index bcf2c51280..ad8557c82f 100644 --- a/go-controller/pkg/ovn/controller/apbroute/external_controller.go +++ b/go-controller/pkg/ovn/controller/apbroute/external_controller.go @@ -211,15 +211,15 @@ type externalPolicyManager struct { // route policies routeLister adminpolicybasedroutelisters.AdminPolicyBasedExternalRouteLister routeInformer cache.SharedIndexInformer - routeQueue workqueue.RateLimitingInterface + routeQueue workqueue.TypedRateLimitingInterface[string] // Pods podLister corev1listers.PodLister podInformer cache.SharedIndexInformer - podQueue workqueue.RateLimitingInterface + podQueue workqueue.TypedRateLimitingInterface[*v1.Pod] // Namespaces - namespaceQueue workqueue.RateLimitingInterface + namespaceQueue workqueue.TypedRateLimitingInterface[*v1.Namespace] namespaceLister corev1listers.NamespaceLister namespaceInformer cache.SharedIndexInformer @@ -249,21 +249,21 @@ func newExternalPolicyManager( routeLister: apbRouteInformer.Lister(), routeInformer: apbRouteInformer.Informer(), - routeQueue: workqueue.NewNamedRateLimitingQueue( - workqueue.NewItemFastSlowRateLimiter(time.Second, 5*time.Second, 5), - "adminpolicybasedexternalroutes", + routeQueue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemFastSlowRateLimiter[string](time.Second, 5*time.Second, 5), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "adminpolicybasedexternalroutes"}, ), podLister: podInformer.Lister(), podInformer: podInformer.Informer(), - podQueue: workqueue.NewNamedRateLimitingQueue( - workqueue.NewItemFastSlowRateLimiter(time.Second, 5*time.Second, 5), - "apbexternalroutepods", + podQueue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemFastSlowRateLimiter[*v1.Pod](time.Second, 5*time.Second, 5), + workqueue.TypedRateLimitingQueueConfig[*v1.Pod]{Name: "apbexternalroutepods"}, ), namespaceLister: namespaceInformer.Lister(), namespaceInformer: namespaceInformer.Informer(), - namespaceQueue: workqueue.NewNamedRateLimitingQueue( - workqueue.NewItemFastSlowRateLimiter(time.Second, 5*time.Second, 5), - "apbexternalroutenamespaces", + namespaceQueue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemFastSlowRateLimiter[*v1.Namespace](time.Second, 5*time.Second, 5), + workqueue.TypedRateLimitingQueueConfig[*v1.Namespace]{Name: "apbexternalroutenamespaces"}, ), updatePolicyStatusFunc: updatePolicyStatusFunc, } @@ -356,13 +356,13 @@ func (m *externalPolicyManager) processNextPolicyWorkItem(wg *sync.WaitGroup) bo defer m.routeQueue.Done(key) klog.V(4).Infof("Processing policy %s", key) - gwIPs, err := m.syncRoutePolicy(key.(string)) + gwIPs, err := m.syncRoutePolicy(key) if err != nil { klog.Errorf("Failed to sync APB policy %s: %v", key, err) } if m.updatePolicyStatusFunc != nil { - statusErr := m.updatePolicyStatusFunc(key.(string), gwIPs, err) + statusErr := m.updatePolicyStatusFunc(key, gwIPs, err) if statusErr != nil { klog.Warningf("Failed to update AdminPolicyBasedExternalRoutes %s status: %v", key, statusErr) } @@ -518,14 +518,14 @@ func (m *externalPolicyManager) processNextNamespaceWorkItem(wg *sync.WaitGroup) defer m.namespaceQueue.Done(obj) - err := m.syncNamespace(obj.(*v1.Namespace), m.routeQueue) + err := m.syncNamespace(obj, m.routeQueue) if err != nil { if m.namespaceQueue.NumRequeues(obj) < maxRetries { - klog.V(4).Infof("Error found while processing namespace %s:%v", obj.(*v1.Namespace), err) + klog.V(4).Infof("Error found while processing namespace %s:%v", obj, err) m.namespaceQueue.AddRateLimited(obj) return true } - klog.Warningf("Dropping namespace %q out of the queue: %v", obj.(*v1.Namespace).Name, err) + klog.Warningf("Dropping namespace %q out of the queue: %v", obj.Name, err) utilruntime.HandleError(err) } m.namespaceQueue.Forget(obj) @@ -570,7 +570,7 @@ func (m *externalPolicyManager) onPodUpdate(oldObj, newObj interface{}) { reflect.DeepEqual(o.Annotations[nettypes.NetworkStatusAnnot], n.Annotations[nettypes.NetworkStatusAnnot]) { return } - m.podQueue.Add(newObj) + m.podQueue.Add(n) } func (m *externalPolicyManager) onPodDelete(obj interface{}) { @@ -609,15 +609,14 @@ func (m *externalPolicyManager) processNextPodWorkItem(wg *sync.WaitGroup) bool defer m.podQueue.Done(obj) - p := obj.(*v1.Pod) - err := m.syncPod(p, m.routeQueue) + err := m.syncPod(obj, m.routeQueue) if err != nil { if m.podQueue.NumRequeues(obj) < maxRetries { - klog.V(4).Infof("Error found while processing pod %s/%s:%v", p.Namespace, p.Name, err) + klog.V(4).Infof("Error found while processing pod %s/%s:%v", obj.Namespace, obj.Name, err) m.podQueue.AddRateLimited(obj) return true } - klog.Warningf("Dropping pod %s/%s out of the queue: %s", p.Namespace, p.Name, err) + klog.Warningf("Dropping pod %s/%s out of the queue: %s", obj.Namespace, obj.Name, err) utilruntime.HandleError(err) } diff --git a/go-controller/pkg/ovn/controller/apbroute/external_controller_namespace.go b/go-controller/pkg/ovn/controller/apbroute/external_controller_namespace.go index 9f8abe48a6..aad9060757 100644 --- a/go-controller/pkg/ovn/controller/apbroute/external_controller_namespace.go +++ b/go-controller/pkg/ovn/controller/apbroute/external_controller_namespace.go @@ -7,7 +7,7 @@ import ( "k8s.io/klog/v2" ) -func (m *externalPolicyManager) syncNamespace(namespace *v1.Namespace, routeQueue workqueue.RateLimitingInterface) error { +func (m *externalPolicyManager) syncNamespace(namespace *v1.Namespace, routeQueue workqueue.TypedRateLimitingInterface[string]) error { policyKeys, err := m.getPoliciesForNamespaceChange(namespace) if err != nil { return err diff --git a/go-controller/pkg/ovn/controller/apbroute/external_controller_namespace_test.go b/go-controller/pkg/ovn/controller/apbroute/external_controller_namespace_test.go index 4fc47c82a4..9952449b9c 100644 --- a/go-controller/pkg/ovn/controller/apbroute/external_controller_namespace_test.go +++ b/go-controller/pkg/ovn/controller/apbroute/external_controller_namespace_test.go @@ -6,7 +6,7 @@ import ( "time" nettypes "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" diff --git a/go-controller/pkg/ovn/controller/apbroute/external_controller_pod.go b/go-controller/pkg/ovn/controller/apbroute/external_controller_pod.go index 3556485d81..c7871a340d 100644 --- a/go-controller/pkg/ovn/controller/apbroute/external_controller_pod.go +++ b/go-controller/pkg/ovn/controller/apbroute/external_controller_pod.go @@ -13,7 +13,7 @@ import ( utilnet "k8s.io/utils/net" ) -func (m *externalPolicyManager) syncPod(pod *v1.Pod, routeQueue workqueue.RateLimitingInterface) error { +func (m *externalPolicyManager) syncPod(pod *v1.Pod, routeQueue workqueue.TypedRateLimitingInterface[string]) error { policyKeys, err := m.getPoliciesForPodChange(pod) if err != nil { return err diff --git a/go-controller/pkg/ovn/controller/apbroute/external_controller_pod_test.go b/go-controller/pkg/ovn/controller/apbroute/external_controller_pod_test.go index ec52216028..e068adefc2 100644 --- a/go-controller/pkg/ovn/controller/apbroute/external_controller_pod_test.go +++ b/go-controller/pkg/ovn/controller/apbroute/external_controller_pod_test.go @@ -5,7 +5,7 @@ import ( "strconv" "time" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" diff --git a/go-controller/pkg/ovn/controller/apbroute/external_controller_policy_test.go b/go-controller/pkg/ovn/controller/apbroute/external_controller_policy_test.go index f5e9f110df..d0c28984f9 100644 --- a/go-controller/pkg/ovn/controller/apbroute/external_controller_policy_test.go +++ b/go-controller/pkg/ovn/controller/apbroute/external_controller_policy_test.go @@ -7,7 +7,7 @@ import ( "sync" nettypes "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" libovsdbclient "github.com/ovn-org/libovsdb/client" diff --git a/go-controller/pkg/ovn/controller/apbroute/gateway_info/gateway_info_suite_test.go b/go-controller/pkg/ovn/controller/apbroute/gateway_info/gateway_info_suite_test.go index 524eb94ea3..0fe3033ebf 100644 --- a/go-controller/pkg/ovn/controller/apbroute/gateway_info/gateway_info_suite_test.go +++ b/go-controller/pkg/ovn/controller/apbroute/gateway_info/gateway_info_suite_test.go @@ -3,7 +3,7 @@ package gateway_info import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/ovn/controller/apbroute/gateway_info/gateway_info_test.go b/go-controller/pkg/ovn/controller/apbroute/gateway_info/gateway_info_test.go index 5d45be21c6..e52ea38b51 100644 --- a/go-controller/pkg/ovn/controller/apbroute/gateway_info/gateway_info_test.go +++ b/go-controller/pkg/ovn/controller/apbroute/gateway_info/gateway_info_test.go @@ -1,7 +1,7 @@ package gateway_info import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/util/sets" ) diff --git a/go-controller/pkg/ovn/controller/egressservice/egressservice_zone.go b/go-controller/pkg/ovn/controller/egressservice/egressservice_zone.go index 1a4fc82b58..316e9df283 100644 --- a/go-controller/pkg/ovn/controller/egressservice/egressservice_zone.go +++ b/go-controller/pkg/ovn/controller/egressservice/egressservice_zone.go @@ -44,11 +44,11 @@ const ( ) type InitClusterEgressPoliciesFunc func(client libovsdbclient.Client, addressSetFactory addressset.AddressSetFactory, - controllerName, clusterRouter string) error + ni util.NetInfo, clusterSubnets []*net.IPNet, controllerName string) error type EnsureNoRerouteNodePoliciesFunc func(client libovsdbclient.Client, addressSetFactory addressset.AddressSetFactory, - controllerName, clusterRouter string, nodeLister corelisters.NodeLister) error + networkName, controllerName, clusterRouter string, nodeLister corelisters.NodeLister, v4, v6 bool) error type DeleteLegacyDefaultNoRerouteNodePoliciesFunc func(nbClient libovsdbclient.Client, clusterRouter, nodeName string) error -type CreateDefaultRouteToExternalFunc func(nbClient libovsdbclient.Client, clusterRouter, gwRouterName string) error +type CreateDefaultRouteToExternalFunc func(nbClient libovsdbclient.Client, clusterRouter, gwRouterName string, clusterSubnets []config.CIDRNetworkEntry) error type Controller struct { // network information @@ -71,7 +71,7 @@ type Controller struct { egressServiceLister egressservicelisters.EgressServiceLister egressServiceSynced cache.InformerSynced - egressServiceQueue workqueue.RateLimitingInterface + egressServiceQueue workqueue.TypedRateLimitingInterface[string] serviceLister corelisters.ServiceLister servicesSynced cache.InformerSynced @@ -81,7 +81,7 @@ type Controller struct { nodeLister corelisters.NodeLister nodesSynced cache.InformerSynced - nodesQueue workqueue.RateLimitingInterface + nodesQueue workqueue.TypedRateLimitingInterface[string] // An address set factory that creates address sets addressSetFactory addressset.AddressSetFactory @@ -150,9 +150,9 @@ func NewController( c.egressServiceLister = esInformer.Lister() c.egressServiceSynced = esInformer.Informer().HasSynced - c.egressServiceQueue = workqueue.NewNamedRateLimitingQueue( - workqueue.NewItemFastSlowRateLimiter(1*time.Second, 5*time.Second, 5), - "egressservices", + c.egressServiceQueue = workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemFastSlowRateLimiter[string](1*time.Second, 5*time.Second, 5), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "egressservices"}, ) _, err := esInformer.Informer().AddEventHandler(factory.WithUpdateHandlingForObjReplace(cache.ResourceEventHandlerFuncs{ AddFunc: c.onEgressServiceAdd, @@ -189,9 +189,9 @@ func NewController( c.nodeLister = nodeInformer.Lister() c.nodesSynced = nodeInformer.Informer().HasSynced - c.nodesQueue = workqueue.NewNamedRateLimitingQueue( - workqueue.NewItemFastSlowRateLimiter(1*time.Second, 5*time.Second, 5), - "egressservicenodes", + c.nodesQueue = workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemFastSlowRateLimiter[string](1*time.Second, 5*time.Second, 5), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "egressservicenodes"}, ) _, err = nodeInformer.Informer().AddEventHandler(factory.WithUpdateHandlingForObjReplace(cache.ResourceEventHandlerFuncs{ AddFunc: c.onNodeAdd, @@ -231,8 +231,8 @@ func (c *Controller) Run(wg *sync.WaitGroup, threadiness int) error { if err != nil { klog.Errorf("Failed to repair Egress Services entries: %v", err) } - - err = c.initClusterEgressPolicies(c.nbClient, c.addressSetFactory, c.controllerName, c.GetNetworkScopedClusterRouterName()) + subnets := util.GetAllClusterSubnetsFromEntries(c.Subnets()) + err = c.initClusterEgressPolicies(c.nbClient, c.addressSetFactory, &util.DefaultNetInfo{}, subnets, c.controllerName) if err != nil { klog.Errorf("Failed to init Egress Services cluster policies: %v", err) } @@ -624,7 +624,7 @@ func (c *Controller) processNextEgressServiceWorkItem(wg *sync.WaitGroup) bool { defer c.egressServiceQueue.Done(key) - err := c.syncEgressService(key.(string)) + err := c.syncEgressService(key) if err == nil { c.egressServiceQueue.Forget(key) return true diff --git a/go-controller/pkg/ovn/controller/egressservice/egressservice_zone_endpointslice.go b/go-controller/pkg/ovn/controller/egressservice/egressservice_zone_endpointslice.go index cdd7e3f42d..d15fb1d3ea 100644 --- a/go-controller/pkg/ovn/controller/egressservice/egressservice_zone_endpointslice.go +++ b/go-controller/pkg/ovn/controller/egressservice/egressservice_zone_endpointslice.go @@ -61,7 +61,7 @@ func (c *Controller) onEndpointSliceDelete(obj interface{}) { } func (c *Controller) queueServiceForEndpointSlice(endpointSlice *discovery.EndpointSlice) { - key, err := services.ServiceControllerKey(endpointSlice) + key, err := services.GetServiceKeyFromEndpointSliceForDefaultNetwork(endpointSlice) if err != nil { // Do not log endpointsSlices missing service labels as errors. // Once the service label is eventually added, we will get this event diff --git a/go-controller/pkg/ovn/controller/egressservice/egressservice_zone_node.go b/go-controller/pkg/ovn/controller/egressservice/egressservice_zone_node.go index bb17226fc2..c12edfea52 100644 --- a/go-controller/pkg/ovn/controller/egressservice/egressservice_zone_node.go +++ b/go-controller/pkg/ovn/controller/egressservice/egressservice_zone_node.go @@ -81,7 +81,7 @@ func (c *Controller) processNextNodeWorkItem(wg *sync.WaitGroup) bool { defer c.nodesQueue.Done(key) - err := c.syncNode(key.(string)) + err := c.syncNode(key) if err == nil { c.nodesQueue.Forget(key) return true @@ -129,7 +129,9 @@ func (c *Controller) syncNode(key string) error { // We ensure node no re-route policies contemplating possible node IP // address changes regardless of allocated services. - err = c.ensureNoRerouteNodePolicies(c.nbClient, c.addressSetFactory, c.controllerName, c.GetNetworkScopedClusterRouterName(), c.nodeLister) + network := util.DefaultNetInfo{} + networkName := network.GetNetworkName() + err = c.ensureNoRerouteNodePolicies(c.nbClient, c.addressSetFactory, networkName, c.controllerName, c.GetNetworkScopedClusterRouterName(), c.nodeLister, config.IPv4Mode, config.IPv6Mode) if err != nil { return err } @@ -156,7 +158,7 @@ func (c *Controller) syncNode(key string) error { // At this point the node exists and is ready if config.OVNKubernetesFeature.EnableInterconnect && c.zone != types.OvnDefaultZone && c.isNodeInLocalZone(n) { - if err := c.createDefaultRouteToExternalForIC(c.nbClient, c.GetNetworkScopedClusterRouterName(), c.GetNetworkScopedGWRouterName(nodeName)); err != nil { + if err := c.createDefaultRouteToExternalForIC(c.nbClient, c.GetNetworkScopedClusterRouterName(), c.GetNetworkScopedGWRouterName(nodeName), c.Subnets()); err != nil { return err } } diff --git a/go-controller/pkg/ovn/controller/services/lb_config.go b/go-controller/pkg/ovn/controller/services/lb_config.go index 07f53e512e..7204882be1 100644 --- a/go-controller/pkg/ovn/controller/services/lb_config.go +++ b/go-controller/pkg/ovn/controller/services/lb_config.go @@ -160,7 +160,9 @@ var protos = []v1.Protocol{ // Template LBs will be created for // - services with NodePort set but *without* ExternalTrafficPolicy=Local or // affinity timeout set. -func buildServiceLBConfigs(service *v1.Service, endpointSlices []*discovery.EndpointSlice, nodeInfos []nodeInfo, useLBGroup, useTemplates bool) (perNodeConfigs, templateConfigs, clusterConfigs []lbConfig) { +func buildServiceLBConfigs(service *v1.Service, endpointSlices []*discovery.EndpointSlice, nodeInfos []nodeInfo, + useLBGroup, useTemplates bool, networkName string) (perNodeConfigs, templateConfigs, clusterConfigs []lbConfig) { + needsAffinityTimeout := hasSessionAffinityTimeOut(service) nodes := sets.New[string]() @@ -168,7 +170,7 @@ func buildServiceLBConfigs(service *v1.Service, endpointSlices []*discovery.Endp nodes.Insert(n.name) } // get all the endpoints classified by port and by port,node - portToClusterEndpoints, portToNodeToEndpoints := getEndpointsForService(endpointSlices, service, nodes) + portToClusterEndpoints, portToNodeToEndpoints := getEndpointsForService(endpointSlices, service, nodes, networkName) for _, svcPort := range service.Spec.Ports { svcPortKey := getServicePortKey(svcPort.Protocol, svcPort.Name) clusterEndpoints := portToClusterEndpoints[svcPortKey] @@ -259,12 +261,15 @@ func buildServiceLBConfigs(service *v1.Service, endpointSlices []*discovery.Endp return } +func makeLBNameForNetwork(service *v1.Service, proto v1.Protocol, scope string, netInfo util.NetInfo) string { + return netInfo.GetNetworkScopedLoadBalancerName(makeLBName(service, proto, scope)) +} + // makeLBName creates the load balancer name - used to minimize churn func makeLBName(service *v1.Service, proto v1.Protocol, scope string) string { return fmt.Sprintf("Service_%s/%s_%s_%s", service.Namespace, service.Name, - proto, scope, - ) + proto, scope) } // buildClusterLBs takes a list of lbConfigs and aggregates them @@ -273,14 +278,15 @@ func makeLBName(service *v1.Service, proto v1.Protocol, scope string) string { // It takes a list of (proto:[vips]:port -> [endpoints]) configs and re-aggregates // them to a list of (proto:[vip:port -> [endpoint:port]]) // This load balancer is attached to all node switches. In shared-GW mode, it is also on all routers -func buildClusterLBs(service *v1.Service, configs []lbConfig, nodeInfos []nodeInfo, useLBGroup bool) []LB { +// The input netInfo is needed to get the right LB groups and network IDs for the specified network. +func buildClusterLBs(service *v1.Service, configs []lbConfig, nodeInfos []nodeInfo, useLBGroup bool, netInfo util.NetInfo) []LB { var nodeSwitches []string var nodeRouters []string var groups []string if useLBGroup { nodeSwitches = make([]string, 0) nodeRouters = make([]string, 0) - groups = []string{types.ClusterLBGroupName} + groups = []string{netInfo.GetNetworkScopedLoadBalancerGroupName(types.ClusterLBGroupName)} } else { nodeSwitches = make([]string, 0, len(nodeInfos)) nodeRouters = make([]string, 0, len(nodeInfos)) @@ -306,9 +312,9 @@ func buildClusterLBs(service *v1.Service, configs []lbConfig, nodeInfos []nodeIn continue } lb := LB{ - Name: makeLBName(service, proto, "cluster"), + Name: makeLBNameForNetwork(service, proto, "cluster", netInfo), Protocol: string(proto), - ExternalIDs: util.ExternalIDsForObject(service), + ExternalIDs: getExternalIDsForLoadBalancer(service, netInfo), Opts: lbOpts(service), Switches: nodeSwitches, @@ -377,11 +383,13 @@ func buildClusterLBs(service *v1.Service, configs []lbConfig, nodeInfos []nodeIn // Note: // NodePort services with ETP=local or affinity timeout set still need // non-template per-node LBs. +// +// The input netInfo is needed to get the right LB groups and network IDs for the specified network. func buildTemplateLBs(service *v1.Service, configs []lbConfig, nodes []nodeInfo, - nodeIPv4Templates, nodeIPv6Templates *NodeIPsTemplates) []LB { + nodeIPv4Templates, nodeIPv6Templates *NodeIPsTemplates, netInfo util.NetInfo) []LB { cbp := configsByProto(configs) - eids := util.ExternalIDsForObject(service) + eids := getExternalIDsForLoadBalancer(service, netInfo) out := make([]LB, 0, len(configs)) for _, proto := range protos { @@ -403,29 +411,30 @@ func buildTemplateLBs(service *v1.Service, configs []lbConfig, nodes []nodeInfo, makeTemplate( makeLBTargetTemplateName( service, proto, config.inport, - optsV4.AddressFamily, "node_switch_template")) + optsV4.AddressFamily, "node_switch_template", netInfo)) switchV6TemplateTarget := makeTemplate( makeLBTargetTemplateName( service, proto, config.inport, - optsV6.AddressFamily, "node_switch_template")) + optsV6.AddressFamily, "node_switch_template", netInfo)) routerV4TemplateTarget := makeTemplate( makeLBTargetTemplateName( service, proto, config.inport, - optsV4.AddressFamily, "node_router_template")) + optsV4.AddressFamily, "node_router_template", netInfo)) routerV6TemplateTarget := makeTemplate( makeLBTargetTemplateName( service, proto, config.inport, - optsV6.AddressFamily, "node_router_template")) + optsV6.AddressFamily, "node_router_template", netInfo)) allV4TargetIPs := config.clusterEndpoints.V4IPs allV6TargetIPs := config.clusterEndpoints.V6IPs for range config.vips { - klog.V(5).Infof("buildTemplateLBs() service %s/%s adding rules", service.Namespace, service.Name) + klog.V(5).Infof("buildTemplateLBs() service %s/%s adding rules for network=%s", + service.Namespace, service.Name, netInfo.GetNetworkName()) // If all targets have exactly the same IPs on all nodes there's // no need to use a template, just use the same list of explicit @@ -538,22 +547,22 @@ func buildTemplateLBs(service *v1.Service, configs []lbConfig, nodes []nodeInfo, if nodeIPv4Templates.Len() > 0 { if len(switchV4Rules) > 0 { out = append(out, LB{ - Name: makeLBName(service, proto, "node_switch_template_IPv4"), + Name: makeLBNameForNetwork(service, proto, "node_switch_template_IPv4", netInfo), Protocol: string(proto), ExternalIDs: eids, Opts: optsV4, - Groups: []string{types.ClusterSwitchLBGroupName}, + Groups: []string{netInfo.GetNetworkScopedLoadBalancerGroupName(types.ClusterSwitchLBGroupName)}, Rules: switchV4Rules, Templates: getTemplatesFromRulesTargets(switchV4Rules), }) } if len(routerV4Rules) > 0 { out = append(out, LB{ - Name: makeLBName(service, proto, "node_router_template_IPv4"), + Name: makeLBNameForNetwork(service, proto, "node_router_template_IPv4", netInfo), Protocol: string(proto), ExternalIDs: eids, Opts: optsV4, - Groups: []string{types.ClusterRouterLBGroupName}, + Groups: []string{netInfo.GetNetworkScopedLoadBalancerGroupName(types.ClusterRouterLBGroupName)}, Rules: routerV4Rules, Templates: getTemplatesFromRulesTargets(routerV4Rules), }) @@ -563,22 +572,22 @@ func buildTemplateLBs(service *v1.Service, configs []lbConfig, nodes []nodeInfo, if nodeIPv6Templates.Len() > 0 { if len(switchV6Rules) > 0 { out = append(out, LB{ - Name: makeLBName(service, proto, "node_switch_template_IPv6"), + Name: makeLBNameForNetwork(service, proto, "node_switch_template_IPv6", netInfo), Protocol: string(proto), ExternalIDs: eids, Opts: optsV6, - Groups: []string{types.ClusterSwitchLBGroupName}, + Groups: []string{netInfo.GetNetworkScopedLoadBalancerGroupName(types.ClusterSwitchLBGroupName)}, Rules: switchV6Rules, Templates: getTemplatesFromRulesTargets(switchV6Rules), }) } if len(routerV6Rules) > 0 { out = append(out, LB{ - Name: makeLBName(service, proto, "node_router_template_IPv6"), + Name: makeLBNameForNetwork(service, proto, "node_router_template_IPv6", netInfo), Protocol: string(proto), ExternalIDs: eids, Opts: optsV6, - Groups: []string{types.ClusterRouterLBGroupName}, + Groups: []string{netInfo.GetNetworkScopedLoadBalancerGroupName(types.ClusterRouterLBGroupName)}, Rules: routerV6Rules, Templates: getTemplatesFromRulesTargets(routerV6Rules), }) @@ -588,9 +597,9 @@ func buildTemplateLBs(service *v1.Service, configs []lbConfig, nodes []nodeInfo, merged := mergeLBs(out) if len(merged) != len(out) { - klog.V(5).Infof("Service %s/%s merged %d LBs to %d", + klog.V(5).Infof("Service %s/%s merged %d LBs to %d for network=%s", service.Namespace, service.Name, - len(out), len(merged)) + len(out), len(merged), netInfo.GetNetworkName()) } return merged @@ -613,9 +622,11 @@ func buildTemplateLBs(service *v1.Service, configs []lbConfig, nodes []nodeInfo, // - SkipSNAT enabled // - NodePort LB on the switch will have masqueradeIP as the vip to handle etp=local for LGW case. // This results in the creation of an additional load balancer on the GatewayRouters and NodeSwitches. -func buildPerNodeLBs(service *v1.Service, configs []lbConfig, nodes []nodeInfo) []LB { +// +// The input netInfo is needed to get the right network IDs for the specified network. +func buildPerNodeLBs(service *v1.Service, configs []lbConfig, nodes []nodeInfo, netInfo util.NetInfo) []LB { cbp := configsByProto(configs) - eids := util.ExternalIDsForObject(service) + eids := getExternalIDsForLoadBalancer(service, netInfo) out := make([]LB, 0, len(nodes)*len(configs)) @@ -746,7 +757,7 @@ func buildPerNodeLBs(service *v1.Service, configs []lbConfig, nodes []nodeInfo) // If switch and router rules are identical, coalesce if reflect.DeepEqual(switchRules, routerRules) && len(switchRules) > 0 && node.gatewayRouterName != "" { out = append(out, LB{ - Name: makeLBName(service, proto, "node_router+switch_"+node.name), + Name: makeLBNameForNetwork(service, proto, "node_router+switch_"+node.name, netInfo), Protocol: string(proto), ExternalIDs: eids, Opts: lbOpts(service), @@ -757,7 +768,7 @@ func buildPerNodeLBs(service *v1.Service, configs []lbConfig, nodes []nodeInfo) } else { if len(routerRules) > 0 && node.gatewayRouterName != "" { out = append(out, LB{ - Name: makeLBName(service, proto, "node_router_"+node.name), + Name: makeLBNameForNetwork(service, proto, "node_router_"+node.name, netInfo), Protocol: string(proto), ExternalIDs: eids, Opts: lbOpts(service), @@ -767,7 +778,7 @@ func buildPerNodeLBs(service *v1.Service, configs []lbConfig, nodes []nodeInfo) } if len(noSNATRouterRules) > 0 && node.gatewayRouterName != "" { lb := LB{ - Name: makeLBName(service, proto, "node_local_router_"+node.name), + Name: makeLBNameForNetwork(service, proto, "node_local_router_"+node.name, netInfo), Protocol: string(proto), ExternalIDs: eids, Opts: lbOpts(service), @@ -780,7 +791,7 @@ func buildPerNodeLBs(service *v1.Service, configs []lbConfig, nodes []nodeInfo) if len(switchRules) > 0 { out = append(out, LB{ - Name: makeLBName(service, proto, "node_switch_"+node.name), + Name: makeLBNameForNetwork(service, proto, "node_switch_"+node.name, netInfo), Protocol: string(proto), ExternalIDs: eids, Opts: lbOpts(service), @@ -794,9 +805,9 @@ func buildPerNodeLBs(service *v1.Service, configs []lbConfig, nodes []nodeInfo) merged := mergeLBs(out) if len(merged) != len(out) { - klog.V(5).Infof("Service %s/%s merged %d LBs to %d", + klog.V(5).Infof("Service %s/%s merged %d LBs to %d for network=%s", service.Namespace, service.Name, - len(out), len(merged)) + len(out), len(merged), netInfo.GetNetworkName()) } return merged @@ -938,7 +949,9 @@ func getServicePortKey(protocol v1.Protocol, name string) string { // one classified by port, one classified by port,node. This second map is only filled in // when the service needs local (per-node) endpoints, that is when ETP=local or ITP=local. // The node list helps to keep the resulting map small, since we're only interested in local endpoints. -func getEndpointsForService(slices []*discovery.EndpointSlice, service *v1.Service, nodes sets.Set[string]) (map[string]lbEndpoints, map[string]map[string]lbEndpoints) { +func getEndpointsForService(slices []*discovery.EndpointSlice, service *v1.Service, nodes sets.Set[string], + networkName string) (map[string]lbEndpoints, map[string]map[string]lbEndpoints) { + // classify endpoints ports := map[string]int32{} portToEndpoints := map[string][]discovery.Endpoint{} @@ -1015,7 +1028,8 @@ func getEndpointsForService(slices []*discovery.EndpointSlice, service *v1.Servi } } } - klog.V(5).Infof("Cluster endpoints for %s/%s are: %v", service.Namespace, service.Name, portToLBEndpoints) + klog.V(5).Infof("Cluster endpoints for %s/%s for network=%s are: %v", + service.Namespace, service.Name, networkName, portToLBEndpoints) for port, nodeToEndpoints := range portToNodeToEndpoints { for node, endpoints := range nodeToEndpoints { @@ -1037,7 +1051,8 @@ func getEndpointsForService(slices []*discovery.EndpointSlice, service *v1.Servi } if requiresLocalEndpoints { - klog.V(5).Infof("Local endpoints for %s/%s are: %v", service.Namespace, service.Name, portToNodeToLBEndpoints) + klog.V(5).Infof("Local endpoints for %s/%s for network=%s are: %v", + service.Namespace, service.Name, networkName, portToNodeToLBEndpoints) } return portToLBEndpoints, portToNodeToLBEndpoints diff --git a/go-controller/pkg/ovn/controller/services/load_balancer_ocphack_test.go b/go-controller/pkg/ovn/controller/services/lb_config_ocphack_test.go similarity index 88% rename from go-controller/pkg/ovn/controller/services/load_balancer_ocphack_test.go rename to go-controller/pkg/ovn/controller/services/lb_config_ocphack_test.go index be003d0aed..646c548a30 100644 --- a/go-controller/pkg/ovn/controller/services/load_balancer_ocphack_test.go +++ b/go-controller/pkg/ovn/controller/services/lb_config_ocphack_test.go @@ -6,6 +6,7 @@ import ( "testing" globalconfig "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -28,6 +29,8 @@ func Test_buildPerNodeLBs_OCPHackForDNS(t *testing.T) { name := "dns-default" namespace := "openshift-dns" + UDNNetInfo := getSampleUDNNetInfo(namespace) + defaultService := &v1.Service{ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, Spec: v1.ServiceSpec{ @@ -57,6 +60,8 @@ func Test_buildPerNodeLBs_OCPHackForDNS(t *testing.T) { "k8s.ovn.org/owner": fmt.Sprintf("%s/%s", namespace, name), } + UDNExternalIDs := loadBalancerExternalIDsForNetwork(namespacedServiceName(namespace, name), UDNNetInfo.GetNetworkName()) + //defaultRouters := []string{"gr-node-a", "gr-node-b"} //defaultSwitches := []string{"switch-node-a", "switch-node-b"} @@ -130,12 +135,27 @@ func Test_buildPerNodeLBs_OCPHackForDNS(t *testing.T) { t.Run(fmt.Sprintf("%d_%s", i, tt.name), func(t *testing.T) { globalconfig.Gateway.Mode = globalconfig.GatewayModeShared - actual := buildPerNodeLBs(tt.service, tt.configs, defaultNodes) + actual := buildPerNodeLBs(tt.service, tt.configs, defaultNodes, &util.DefaultNetInfo{}) + assert.Equal(t, tt.expected, actual, "shared gateway mode not as expected") + + globalconfig.Gateway.Mode = globalconfig.GatewayModeLocal + actual = buildPerNodeLBs(tt.service, tt.configs, defaultNodes, &util.DefaultNetInfo{}) + assert.Equal(t, tt.expected, actual, "local gateway mode not as expected") + + // UDN + for idx := range tt.expected { + tt.expected[idx].ExternalIDs = UDNExternalIDs + tt.expected[idx].Name = UDNNetInfo.GetNetworkScopedLoadBalancerName(tt.expected[idx].Name) + + } + globalconfig.Gateway.Mode = globalconfig.GatewayModeShared + actual = buildPerNodeLBs(tt.service, tt.configs, defaultNodes, UDNNetInfo) assert.Equal(t, tt.expected, actual, "shared gateway mode not as expected") globalconfig.Gateway.Mode = globalconfig.GatewayModeLocal - actual = buildPerNodeLBs(tt.service, tt.configs, defaultNodes) + actual = buildPerNodeLBs(tt.service, tt.configs, defaultNodes, UDNNetInfo) assert.Equal(t, tt.expected, actual, "local gateway mode not as expected") + }) } } @@ -155,6 +175,8 @@ func Test_buildPerNodeLBs_OCPHackForLocalWithFallback(t *testing.T) { inport := int32(80) outport := int32(8080) + UDNNetInfo := getSampleUDNNetInfo(namespace) + defaultService := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -208,6 +230,8 @@ func Test_buildPerNodeLBs_OCPHackForLocalWithFallback(t *testing.T) { "k8s.ovn.org/owner": fmt.Sprintf("%s/%s", namespace, name), } + UDNExternalIDs := loadBalancerExternalIDsForNetwork(namespacedServiceName(namespace, name), UDNNetInfo.GetNetworkName()) + defaultOpts := LBOpts{Reject: true} noSNATOpts := LBOpts{SkipSNAT: true, Reject: true} @@ -424,11 +448,25 @@ func Test_buildPerNodeLBs_OCPHackForLocalWithFallback(t *testing.T) { t.Run(fmt.Sprintf("%d_%s", i, tt.name), func(t *testing.T) { globalconfig.Gateway.Mode = globalconfig.GatewayModeShared - actual := buildPerNodeLBs(tt.service, tt.configs, defaultNodes) + actual := buildPerNodeLBs(tt.service, tt.configs, defaultNodes, &util.DefaultNetInfo{}) + assert.Equal(t, tt.expected, actual, "shared gateway mode not as expected") + + globalconfig.Gateway.Mode = globalconfig.GatewayModeLocal + actual = buildPerNodeLBs(tt.service, tt.configs, defaultNodes, &util.DefaultNetInfo{}) + assert.Equal(t, tt.expected, actual, "local gateway mode not as expected") + + // UDN + for idx := range tt.expected { + tt.expected[idx].ExternalIDs = UDNExternalIDs + tt.expected[idx].Name = UDNNetInfo.GetNetworkScopedLoadBalancerName(tt.expected[idx].Name) + + } + globalconfig.Gateway.Mode = globalconfig.GatewayModeShared + actual = buildPerNodeLBs(tt.service, tt.configs, defaultNodes, UDNNetInfo) assert.Equal(t, tt.expected, actual, "shared gateway mode not as expected") globalconfig.Gateway.Mode = globalconfig.GatewayModeLocal - actual = buildPerNodeLBs(tt.service, tt.configs, defaultNodes) + actual = buildPerNodeLBs(tt.service, tt.configs, defaultNodes, UDNNetInfo) assert.Equal(t, tt.expected, actual, "local gateway mode not as expected") }) } diff --git a/go-controller/pkg/ovn/controller/services/lb_config_test.go b/go-controller/pkg/ovn/controller/services/lb_config_test.go index 5181199362..4e8c124537 100644 --- a/go-controller/pkg/ovn/controller/services/lb_config_test.go +++ b/go-controller/pkg/ovn/controller/services/lb_config_test.go @@ -6,10 +6,12 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + globalconfig "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" - kube_test "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" + kubetest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" - "github.com/stretchr/testify/assert" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" v1 "k8s.io/api/core/v1" discovery "k8s.io/api/discovery/v1" @@ -21,8 +23,6 @@ import ( ) var ( - nodeA = "node-a" - nodeB = "node-b" defaultNodes = []nodeInfo{ { name: nodeA, @@ -40,9 +40,6 @@ var ( }, } - tcpv1 = v1.ProtocolTCP - udpv1 = v1.ProtocolUDP - httpPortName string = "http" httpPortValue int32 = int32(80) httpsPortName string = "https" @@ -140,8 +137,6 @@ func Test_buildServiceLBConfigs(t *testing.T) { inport1 := int32(81) outport1 := int32(8081) outportstr := intstr.FromInt(int(outport)) - tcp := v1.ProtocolTCP - udp := v1.ProtocolUDP // make slices // nil slice = don't use this family @@ -172,7 +167,7 @@ func Test_buildServiceLBConfigs(t *testing.T) { Name: &portName, }}, AddressType: discovery.AddressTypeIPv4, - Endpoints: kube_test.MakeReadyEndpointList(nodeA, v4ips...), + Endpoints: kubetest.MakeReadyEndpointList(nodeA, v4ips...), }) } @@ -200,7 +195,7 @@ func Test_buildServiceLBConfigs(t *testing.T) { Name: &portName, }}, AddressType: discovery.AddressTypeIPv6, - Endpoints: kube_test.MakeReadyEndpointList(nodeA, v6ips...), + Endpoints: kubetest.MakeReadyEndpointList(nodeA, v6ips...), }) } @@ -361,7 +356,7 @@ func Test_buildServiceLBConfigs(t *testing.T) { }, }, AddressType: discovery.AddressTypeIPv4, - Endpoints: kube_test.MakeReadyEndpointList(nodeA, "10.128.0.2", "10.128.1.2"), + Endpoints: kubetest.MakeReadyEndpointList(nodeA, "10.128.0.2", "10.128.1.2"), }, }, service: &v1.Service{ @@ -433,7 +428,7 @@ func Test_buildServiceLBConfigs(t *testing.T) { }, }, AddressType: discovery.AddressTypeIPv4, - Endpoints: kube_test.MakeReadyEndpointList(nodeA, "10.128.0.2", "10.128.1.2"), + Endpoints: kubetest.MakeReadyEndpointList(nodeA, "10.128.0.2", "10.128.1.2"), }, }, service: &v1.Service{ @@ -900,8 +895,8 @@ func Test_buildServiceLBConfigs(t *testing.T) { args: args{ slices: makeV4SliceWithEndpoints( v1.ProtocolTCP, - kube_test.MakeReadyEndpoint(nodeA, "10.128.0.2"), - kube_test.MakeReadyEndpoint(nodeB, "10.128.1.2"), + kubetest.MakeReadyEndpoint(nodeA, "10.128.0.2"), + kubetest.MakeReadyEndpoint(nodeB, "10.128.1.2"), ), service: &v1.Service{ ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: ns}, @@ -1002,8 +997,8 @@ func Test_buildServiceLBConfigs(t *testing.T) { name: "LB service with NodePort, port, two endpoints, external ips + lb status, ExternalTrafficPolicy=local, one endpoint is ready, the other one is terminating and serving", args: args{ slices: makeV4SliceWithEndpoints(v1.ProtocolTCP, - kube_test.MakeReadyEndpoint(nodeA, "10.128.0.2"), - kube_test.MakeTerminatingServingEndpoint(nodeB, "10.128.1.2")), + kubetest.MakeReadyEndpoint(nodeA, "10.128.0.2"), + kubetest.MakeTerminatingServingEndpoint(nodeB, "10.128.1.2")), service: &v1.Service{ ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: ns}, Spec: v1.ServiceSpec{ @@ -1101,8 +1096,8 @@ func Test_buildServiceLBConfigs(t *testing.T) { name: "LB service with NodePort, one port, two endpoints, external ips + lb status, ExternalTrafficPolicy=local, both endpoints terminating: one is serving, the other one is not", args: args{ slices: makeV4SliceWithEndpoints(v1.ProtocolTCP, - kube_test.MakeTerminatingServingEndpoint(nodeA, "10.128.0.2"), - kube_test.MakeTerminatingNonServingEndpoint(nodeB, "10.128.1.2")), + kubetest.MakeTerminatingServingEndpoint(nodeA, "10.128.0.2"), + kubetest.MakeTerminatingNonServingEndpoint(nodeB, "10.128.1.2")), service: &v1.Service{ ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: ns}, Spec: v1.ServiceSpec{ @@ -1189,7 +1184,7 @@ func Test_buildServiceLBConfigs(t *testing.T) { t.Run(fmt.Sprintf("%d_%s", i, tt.name), func(t *testing.T) { // shared gateway mode globalconfig.Gateway.Mode = globalconfig.GatewayModeShared - perNode, template, clusterWide := buildServiceLBConfigs(tt.args.service, tt.args.slices, defaultNodes, true, true) + perNode, template, clusterWide := buildServiceLBConfigs(tt.args.service, tt.args.slices, defaultNodes, true, true, types.DefaultNetworkName) assert.EqualValues(t, tt.resultSharedGatewayNode, perNode, "SGW per-node configs should be equal") assert.EqualValues(t, tt.resultSharedGatewayTemplate, template, "SGW template configs should be equal") @@ -1197,7 +1192,8 @@ func Test_buildServiceLBConfigs(t *testing.T) { // local gateway mode globalconfig.Gateway.Mode = globalconfig.GatewayModeLocal - perNode, template, clusterWide = buildServiceLBConfigs(tt.args.service, tt.args.slices, defaultNodes, true, true) + + perNode, template, clusterWide = buildServiceLBConfigs(tt.args.service, tt.args.slices, defaultNodes, true, true, types.DefaultNetworkName) if tt.resultsSame { assert.EqualValues(t, tt.resultSharedGatewayNode, perNode, "LGW per-node configs should be equal") assert.EqualValues(t, tt.resultSharedGatewayTemplate, template, "LGW template configs should be equal") @@ -1216,8 +1212,10 @@ func Test_buildClusterLBs(t *testing.T) { namespace := "testns" oldGwMode := globalconfig.Gateway.Mode + oldIPv4Mode := globalconfig.IPv4Mode defer func() { globalconfig.Gateway.Mode = oldGwMode + globalconfig.IPv4Mode = oldIPv4Mode }() globalconfig.Gateway.Mode = globalconfig.GatewayModeShared @@ -1228,16 +1226,18 @@ func Test_buildClusterLBs(t *testing.T) { }, } - defaultExternalIDs := map[string]string{ - types.LoadBalancerKindExternalID: "Service", - types.LoadBalancerOwnerExternalID: fmt.Sprintf("%s/%s", namespace, name), - } - defaultRouters := []string{} defaultSwitches := []string{} - defaultGroups := []string{"clusterLBGroup"} + defaultGroups := []string{types.ClusterLBGroupName} defaultOpts := LBOpts{Reject: true} + globalconfig.IPv4Mode = true + l3UDN, err := getSampleUDNNetInfo(namespace, "layer3") + assert.Equal(t, err, nil) + l2UDN, err := getSampleUDNNetInfo(namespace, "layer2") + assert.Equal(t, err, nil) + udnNets := []util.NetInfo{l3UDN, l2UDN} + tc := []struct { name string service *v1.Service @@ -1285,7 +1285,7 @@ func Test_buildClusterLBs(t *testing.T) { { Name: fmt.Sprintf("Service_%s/%s_TCP_cluster", namespace, name), Protocol: "TCP", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Rules: []LBRule{ { Source: Addr{IP: "1.2.3.4", Port: 80}, @@ -1344,7 +1344,7 @@ func Test_buildClusterLBs(t *testing.T) { { Name: fmt.Sprintf("Service_%s/%s_TCP_cluster", namespace, name), Protocol: "TCP", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Rules: []LBRule{ { Source: Addr{IP: "1.2.3.4", Port: 80}, @@ -1360,7 +1360,7 @@ func Test_buildClusterLBs(t *testing.T) { { Name: fmt.Sprintf("Service_%s/%s_UDP_cluster", namespace, name), Protocol: "UDP", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Rules: []LBRule{ { Source: Addr{IP: "1.2.3.4", Port: 443}, @@ -1421,7 +1421,7 @@ func Test_buildClusterLBs(t *testing.T) { { Name: fmt.Sprintf("Service_%s/%s_TCP_cluster", namespace, name), Protocol: "TCP", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Rules: []LBRule{ { Source: Addr{IP: "1.2.3.4", Port: 80}, @@ -1451,8 +1451,24 @@ func Test_buildClusterLBs(t *testing.T) { } for i, tt := range tc { t.Run(fmt.Sprintf("%d_%s", i, tt.name), func(t *testing.T) { - actual := buildClusterLBs(tt.service, tt.configs, tt.nodeInfos, true) + + // default network + actual := buildClusterLBs(tt.service, tt.configs, tt.nodeInfos, true, &util.DefaultNetInfo{}) assert.Equal(t, tt.expected, actual) + + // UDN + for _, udn := range udnNets { + UDNExternalIDs := loadBalancerExternalIDsForNetwork(namespacedServiceName(namespace, name), udn.GetNetworkName()) + expected := make([]LB, len(tt.expected)) + copy(expected, tt.expected) + for idx := range tt.expected { + expected[idx].ExternalIDs = UDNExternalIDs + expected[idx].Groups = []string{udn.GetNetworkScopedLoadBalancerGroupName(types.ClusterLBGroupName)} + expected[idx].Name = udn.GetNetworkScopedLoadBalancerName(tt.expected[idx].Name) + } + actual = buildClusterLBs(tt.service, tt.configs, tt.nodeInfos, true, udn) + assert.Equal(t, expected, actual) + } }) } } @@ -1461,7 +1477,9 @@ func Test_buildPerNodeLBs(t *testing.T) { oldClusterSubnet := globalconfig.Default.ClusterSubnets oldGwMode := globalconfig.Gateway.Mode oldServiceCIDRs := globalconfig.Kubernetes.ServiceCIDRs + oldIPv4Mode := globalconfig.IPv4Mode defer func() { + globalconfig.IPv4Mode = oldIPv4Mode globalconfig.Gateway.Mode = oldGwMode globalconfig.Default.ClusterSubnets = oldClusterSubnet globalconfig.Kubernetes.ServiceCIDRs = oldServiceCIDRs @@ -1474,10 +1492,17 @@ func Test_buildPerNodeLBs(t *testing.T) { _, svcCIDRv6, _ := net.ParseCIDR("fd92::0/80") globalconfig.Kubernetes.ServiceCIDRs = []*net.IPNet{svcCIDRv4} + globalconfig.IPv4Mode = true name := "foo" namespace := "testns" + l3UDN, err := getSampleUDNNetInfo(namespace, "layer3") + assert.Equal(t, err, nil) + l2UDN, err := getSampleUDNNetInfo(namespace, "layer2") + assert.Equal(t, err, nil) + udnNetworks := []util.NetInfo{l3UDN, l2UDN} + defaultService := &v1.Service{ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, Spec: v1.ServiceSpec{ @@ -1523,10 +1548,6 @@ func Test_buildPerNodeLBs(t *testing.T) { }, } - defaultExternalIDs := map[string]string{ - types.LoadBalancerKindExternalID: "Service", - types.LoadBalancerOwnerExternalID: fmt.Sprintf("%s/%s", namespace, name), - } defaultOpts := LBOpts{Reject: true} //defaultRouters := []string{"gr-node-a", "gr-node-b"} @@ -1562,7 +1583,7 @@ func Test_buildPerNodeLBs(t *testing.T) { expectedShared: []LB{ { Name: "Service_testns/foo_TCP_node_router_node-a", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Routers: []string{"gr-node-a"}, Protocol: "TCP", Rules: []LBRule{ @@ -1575,7 +1596,7 @@ func Test_buildPerNodeLBs(t *testing.T) { }, { Name: "Service_testns/foo_TCP_node_switch_node-a_merged", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Routers: []string{"gr-node-b"}, Switches: []string{"switch-node-a", "switch-node-b"}, Protocol: "TCP", @@ -1609,7 +1630,7 @@ func Test_buildPerNodeLBs(t *testing.T) { expectedShared: []LB{ { Name: "Service_testns/foo_TCP_node_router+switch_node-a", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Routers: []string{"gr-node-a"}, Switches: []string{"switch-node-a"}, Protocol: "TCP", @@ -1627,7 +1648,7 @@ func Test_buildPerNodeLBs(t *testing.T) { }, { Name: "Service_testns/foo_TCP_node_router+switch_node-b", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Routers: []string{"gr-node-b"}, Switches: []string{"switch-node-b"}, Protocol: "TCP", @@ -1643,7 +1664,7 @@ func Test_buildPerNodeLBs(t *testing.T) { expectedLocal: []LB{ { Name: "Service_testns/foo_TCP_node_router+switch_node-a", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Routers: []string{"gr-node-a"}, Switches: []string{"switch-node-a"}, Protocol: "TCP", @@ -1661,7 +1682,7 @@ func Test_buildPerNodeLBs(t *testing.T) { }, { Name: "Service_testns/foo_TCP_node_router+switch_node-b", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Routers: []string{"gr-node-b"}, Switches: []string{"switch-node-b"}, Protocol: "TCP", @@ -1713,7 +1734,7 @@ func Test_buildPerNodeLBs(t *testing.T) { expectedShared: []LB{ { Name: "Service_testns/foo_TCP_node_router_node-a", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Routers: []string{"gr-node-a"}, Protocol: "TCP", Rules: []LBRule{ @@ -1734,7 +1755,7 @@ func Test_buildPerNodeLBs(t *testing.T) { }, { Name: "Service_testns/foo_TCP_node_switch_node-a", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Switches: []string{"switch-node-a"}, Protocol: "TCP", Rules: []LBRule{ @@ -1755,7 +1776,7 @@ func Test_buildPerNodeLBs(t *testing.T) { }, { Name: "Service_testns/foo_TCP_node_router+switch_node-b", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Routers: []string{"gr-node-b"}, Switches: []string{"switch-node-b"}, Protocol: "TCP", @@ -1775,7 +1796,7 @@ func Test_buildPerNodeLBs(t *testing.T) { expectedLocal: []LB{ { Name: "Service_testns/foo_TCP_node_router_node-a", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Routers: []string{"gr-node-a"}, Protocol: "TCP", Rules: []LBRule{ @@ -1796,7 +1817,7 @@ func Test_buildPerNodeLBs(t *testing.T) { }, { Name: "Service_testns/foo_TCP_node_switch_node-a", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Switches: []string{"switch-node-a"}, Protocol: "TCP", Rules: []LBRule{ @@ -1817,7 +1838,7 @@ func Test_buildPerNodeLBs(t *testing.T) { }, { Name: "Service_testns/foo_TCP_node_router+switch_node-b", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Routers: []string{"gr-node-b"}, Switches: []string{"switch-node-b"}, Protocol: "TCP", @@ -1880,7 +1901,7 @@ func Test_buildPerNodeLBs(t *testing.T) { // switch clusterip + nodeport { Name: "Service_testns/foo_TCP_node_router_node-a", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Routers: []string{"gr-node-a"}, Protocol: "TCP", Rules: []LBRule{ @@ -1893,7 +1914,7 @@ func Test_buildPerNodeLBs(t *testing.T) { }, { Name: "Service_testns/foo_TCP_node_local_router_node-a", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Routers: []string{"gr-node-a"}, Opts: LBOpts{SkipSNAT: true, Reject: true}, Protocol: "TCP", @@ -1910,7 +1931,7 @@ func Test_buildPerNodeLBs(t *testing.T) { }, { Name: "Service_testns/foo_TCP_node_switch_node-a", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Switches: []string{"switch-node-a"}, Protocol: "TCP", Rules: []LBRule{ @@ -1944,7 +1965,7 @@ func Test_buildPerNodeLBs(t *testing.T) { // switch clusterip + nodeport { Name: "Service_testns/foo_TCP_node_router_node-b", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Routers: []string{"gr-node-b"}, Protocol: "TCP", Rules: []LBRule{ @@ -1961,7 +1982,7 @@ func Test_buildPerNodeLBs(t *testing.T) { }, { Name: "Service_testns/foo_TCP_node_switch_node-b", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Switches: []string{"switch-node-b"}, Protocol: "TCP", Rules: []LBRule{ @@ -2029,7 +2050,7 @@ func Test_buildPerNodeLBs(t *testing.T) { expectedShared: []LB{ { Name: "Service_testns/foo_TCP_node_router_node-a_merged", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Routers: []string{"gr-node-a", "gr-node-b"}, Protocol: "TCP", Rules: []LBRule{ @@ -2046,7 +2067,7 @@ func Test_buildPerNodeLBs(t *testing.T) { }, { Name: "Service_testns/foo_TCP_node_switch_node-a", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Switches: []string{"switch-node-a"}, Protocol: "TCP", Rules: []LBRule{ @@ -2063,7 +2084,7 @@ func Test_buildPerNodeLBs(t *testing.T) { }, { Name: "Service_testns/foo_TCP_node_switch_node-b", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Switches: []string{"switch-node-b"}, Protocol: "TCP", Rules: []LBRule{ @@ -2082,7 +2103,7 @@ func Test_buildPerNodeLBs(t *testing.T) { expectedLocal: []LB{ { Name: "Service_testns/foo_TCP_node_router_node-a_merged", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Routers: []string{"gr-node-a", "gr-node-b"}, Protocol: "TCP", Rules: []LBRule{ @@ -2099,7 +2120,7 @@ func Test_buildPerNodeLBs(t *testing.T) { }, { Name: "Service_testns/foo_TCP_node_switch_node-a", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Switches: []string{"switch-node-a"}, Protocol: "TCP", Rules: []LBRule{ @@ -2116,7 +2137,7 @@ func Test_buildPerNodeLBs(t *testing.T) { }, { Name: "Service_testns/foo_TCP_node_switch_node-b", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Switches: []string{"switch-node-b"}, Protocol: "TCP", Rules: []LBRule{ @@ -2180,7 +2201,7 @@ func Test_buildPerNodeLBs(t *testing.T) { expectedShared: []LB{ { Name: "Service_testns/foo_TCP_node_router_node-a", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Routers: []string{"gr-node-a"}, Protocol: "TCP", Rules: []LBRule{ @@ -2197,7 +2218,7 @@ func Test_buildPerNodeLBs(t *testing.T) { }, { Name: "Service_testns/foo_TCP_node_switch_node-a", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Switches: []string{"switch-node-a"}, Protocol: "TCP", Rules: []LBRule{ @@ -2214,7 +2235,7 @@ func Test_buildPerNodeLBs(t *testing.T) { }, { Name: "Service_testns/foo_TCP_node_router_node-b", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Routers: []string{"gr-node-b"}, Protocol: "TCP", Rules: []LBRule{ @@ -2231,7 +2252,7 @@ func Test_buildPerNodeLBs(t *testing.T) { }, { Name: "Service_testns/foo_TCP_node_switch_node-b", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Switches: []string{"switch-node-b"}, Protocol: "TCP", Rules: []LBRule{ @@ -2250,7 +2271,7 @@ func Test_buildPerNodeLBs(t *testing.T) { expectedLocal: []LB{ { Name: "Service_testns/foo_TCP_node_router_node-a", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Routers: []string{"gr-node-a"}, Protocol: "TCP", Rules: []LBRule{ @@ -2267,7 +2288,7 @@ func Test_buildPerNodeLBs(t *testing.T) { }, { Name: "Service_testns/foo_TCP_node_switch_node-a", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Switches: []string{"switch-node-a"}, Protocol: "TCP", Rules: []LBRule{ @@ -2284,7 +2305,7 @@ func Test_buildPerNodeLBs(t *testing.T) { }, { Name: "Service_testns/foo_TCP_node_router_node-b", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Routers: []string{"gr-node-b"}, Protocol: "TCP", Rules: []LBRule{ @@ -2301,7 +2322,7 @@ func Test_buildPerNodeLBs(t *testing.T) { }, { Name: "Service_testns/foo_TCP_node_switch_node-b", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Switches: []string{"switch-node-b"}, Protocol: "TCP", Rules: []LBRule{ @@ -2362,7 +2383,7 @@ func Test_buildPerNodeLBs(t *testing.T) { expectedShared: []LB{ { Name: "Service_testns/foo_TCP_node_router_node-a", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Routers: []string{"gr-node-a"}, Protocol: "TCP", Rules: []LBRule{ @@ -2375,7 +2396,7 @@ func Test_buildPerNodeLBs(t *testing.T) { }, { Name: "Service_testns/foo_TCP_node_local_router_node-a", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Routers: []string{"gr-node-a"}, Opts: LBOpts{SkipSNAT: true, Reject: true}, Protocol: "TCP", @@ -2392,7 +2413,7 @@ func Test_buildPerNodeLBs(t *testing.T) { }, { Name: "Service_testns/foo_TCP_node_switch_node-a", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Switches: []string{"switch-node-a"}, Protocol: "TCP", Rules: []LBRule{ @@ -2421,7 +2442,7 @@ func Test_buildPerNodeLBs(t *testing.T) { }, { Name: "Service_testns/foo_TCP_node_router_node-b", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Routers: []string{"gr-node-b"}, Protocol: "TCP", Rules: []LBRule{ @@ -2438,7 +2459,7 @@ func Test_buildPerNodeLBs(t *testing.T) { }, { Name: "Service_testns/foo_TCP_node_switch_node-b", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Switches: []string{"switch-node-b"}, Protocol: "TCP", Rules: []LBRule{ @@ -2461,7 +2482,7 @@ func Test_buildPerNodeLBs(t *testing.T) { expectedLocal: []LB{ { Name: "Service_testns/foo_TCP_node_router_node-a", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Routers: []string{"gr-node-a"}, Protocol: "TCP", Rules: []LBRule{ @@ -2474,7 +2495,7 @@ func Test_buildPerNodeLBs(t *testing.T) { }, { Name: "Service_testns/foo_TCP_node_local_router_node-a", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Routers: []string{"gr-node-a"}, Opts: LBOpts{SkipSNAT: true, Reject: true}, Protocol: "TCP", @@ -2491,7 +2512,7 @@ func Test_buildPerNodeLBs(t *testing.T) { }, { Name: "Service_testns/foo_TCP_node_switch_node-a", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Switches: []string{"switch-node-a"}, Protocol: "TCP", Rules: []LBRule{ @@ -2520,7 +2541,7 @@ func Test_buildPerNodeLBs(t *testing.T) { }, { Name: "Service_testns/foo_TCP_node_router_node-b", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Routers: []string{"gr-node-b"}, Protocol: "TCP", Rules: []LBRule{ @@ -2537,7 +2558,7 @@ func Test_buildPerNodeLBs(t *testing.T) { }, { Name: "Service_testns/foo_TCP_node_switch_node-b", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Switches: []string{"switch-node-b"}, Protocol: "TCP", Rules: []LBRule{ @@ -2610,7 +2631,7 @@ func Test_buildPerNodeLBs(t *testing.T) { { Name: "Service_testns/foo_TCP_node_local_router_node-a", Protocol: "TCP", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Opts: LBOpts{SkipSNAT: true, Reject: true}, Rules: []LBRule{ @@ -2636,7 +2657,7 @@ func Test_buildPerNodeLBs(t *testing.T) { { Name: "Service_testns/foo_TCP_node_switch_node-a", Protocol: "TCP", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Opts: defaultOpts, Rules: []LBRule{ { @@ -2669,7 +2690,7 @@ func Test_buildPerNodeLBs(t *testing.T) { { Name: "Service_testns/foo_TCP_node_local_router_node-b", Protocol: "TCP", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Opts: LBOpts{SkipSNAT: true, Reject: true}, Rules: []LBRule{ { @@ -2690,7 +2711,7 @@ func Test_buildPerNodeLBs(t *testing.T) { { Name: "Service_testns/foo_TCP_node_switch_node-b", Protocol: "TCP", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Opts: defaultOpts, Rules: []LBRule{ { @@ -2753,7 +2774,7 @@ func Test_buildPerNodeLBs(t *testing.T) { { Name: "Service_testns/foo_TCP_node_local_router_node-a", Protocol: "TCP", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Opts: LBOpts{SkipSNAT: true, Reject: true}, Rules: []LBRule{ @@ -2779,7 +2800,7 @@ func Test_buildPerNodeLBs(t *testing.T) { { Name: "Service_testns/foo_TCP_node_switch_node-a", Protocol: "TCP", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Opts: defaultOpts, Rules: []LBRule{ { @@ -2812,7 +2833,7 @@ func Test_buildPerNodeLBs(t *testing.T) { { Name: "Service_testns/foo_TCP_node_local_router_node-b", Protocol: "TCP", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Opts: LBOpts{SkipSNAT: true, Reject: true}, Rules: []LBRule{ { @@ -2833,7 +2854,7 @@ func Test_buildPerNodeLBs(t *testing.T) { { Name: "Service_testns/foo_TCP_node_switch_node-b", Protocol: "TCP", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Opts: defaultOpts, Rules: []LBRule{ { @@ -2888,7 +2909,7 @@ func Test_buildPerNodeLBs(t *testing.T) { expectedShared: []LB{ { Name: "Service_testns/foo_TCP_node_router+switch_node-a", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Routers: []string{"gr-node-a"}, Switches: []string{"switch-node-a"}, Protocol: "TCP", @@ -2906,7 +2927,7 @@ func Test_buildPerNodeLBs(t *testing.T) { }, { Name: "Service_testns/foo_TCP_node_router+switch_node-b", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Routers: []string{"gr-node-b"}, Switches: []string{"switch-node-b"}, Protocol: "TCP", @@ -2922,7 +2943,7 @@ func Test_buildPerNodeLBs(t *testing.T) { expectedLocal: []LB{ { Name: "Service_testns/foo_TCP_node_router+switch_node-a", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Routers: []string{"gr-node-a"}, Switches: []string{"switch-node-a"}, Protocol: "TCP", @@ -2940,7 +2961,7 @@ func Test_buildPerNodeLBs(t *testing.T) { }, { Name: "Service_testns/foo_TCP_node_router+switch_node-b", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Routers: []string{"gr-node-b"}, Switches: []string{"switch-node-b"}, Protocol: "TCP", @@ -2994,7 +3015,7 @@ func Test_buildPerNodeLBs(t *testing.T) { { Name: "Service_testns/foo_TCP_node_local_router_node-a", Protocol: "TCP", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Opts: LBOpts{SkipSNAT: true, Reject: true}, Rules: []LBRule{ @@ -3020,7 +3041,7 @@ func Test_buildPerNodeLBs(t *testing.T) { { Name: "Service_testns/foo_TCP_node_switch_node-a", Protocol: "TCP", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Opts: defaultOpts, Rules: []LBRule{ { @@ -3053,7 +3074,7 @@ func Test_buildPerNodeLBs(t *testing.T) { { Name: "Service_testns/foo_TCP_node_local_router_node-b", Protocol: "TCP", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Opts: LBOpts{SkipSNAT: true, Reject: true}, Rules: []LBRule{ { @@ -3074,7 +3095,7 @@ func Test_buildPerNodeLBs(t *testing.T) { { Name: "Service_testns/foo_TCP_node_switch_node-b", Protocol: "TCP", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Opts: defaultOpts, Rules: []LBRule{ { @@ -3099,20 +3120,47 @@ func Test_buildPerNodeLBs(t *testing.T) { }, }, } + // v4 for i, tt := range tc { t.Run(fmt.Sprintf("%d_%s", i, tt.name), func(t *testing.T) { - if tt.expectedShared != nil { globalconfig.Gateway.Mode = globalconfig.GatewayModeShared - actual := buildPerNodeLBs(tt.service, tt.configs, defaultNodes) + // cluster default network + actual := buildPerNodeLBs(tt.service, tt.configs, defaultNodes, &util.DefaultNetInfo{}) assert.Equal(t, tt.expectedShared, actual, "shared gateway mode not as expected") + + // UDN + for _, udn := range udnNetworks { + expectedShared := make([]LB, len(tt.expectedShared)) + copy(expectedShared, tt.expectedShared) + for idx := range tt.expectedShared { + expectedShared[idx].ExternalIDs = loadBalancerExternalIDsForNetwork(namespacedServiceName(namespace, name), udn.GetNetworkName()) + expectedShared[idx].Name = udn.GetNetworkScopedLoadBalancerName(tt.expectedShared[idx].Name) + } + actual = buildPerNodeLBs(tt.service, tt.configs, defaultNodes, udn) + assert.Equal(t, expectedShared, actual, "shared gateway mode not as expected") + } } if tt.expectedLocal != nil { globalconfig.Gateway.Mode = globalconfig.GatewayModeLocal - actual := buildPerNodeLBs(tt.service, tt.configs, defaultNodes) + + // cluster default network + actual := buildPerNodeLBs(tt.service, tt.configs, defaultNodes, &util.DefaultNetInfo{}) assert.Equal(t, tt.expectedLocal, actual, "local gateway mode not as expected") + + // UDN + for _, udn := range udnNetworks { + expectedLocal := make([]LB, len(tt.expectedLocal)) + copy(expectedLocal, tt.expectedLocal) + for idx := range tt.expectedLocal { + expectedLocal[idx].ExternalIDs = loadBalancerExternalIDsForNetwork(namespacedServiceName(namespace, name), udn.GetNetworkName()) + expectedLocal[idx].Name = udn.GetNetworkScopedLoadBalancerName(tt.expectedLocal[idx].Name) + } + actual = buildPerNodeLBs(tt.service, tt.configs, defaultNodes, udn) + assert.Equal(t, expectedLocal, actual, "local gateway mode not as expected") + } } }) @@ -3125,14 +3173,42 @@ func Test_buildPerNodeLBs(t *testing.T) { if tt.expectedShared != nil { globalconfig.Gateway.Mode = globalconfig.GatewayModeShared - actual := buildPerNodeLBs(tt.service, tt.configs, defaultNodesV6) + + // cluster default network + actual := buildPerNodeLBs(tt.service, tt.configs, defaultNodesV6, &util.DefaultNetInfo{}) assert.Equal(t, tt.expectedShared, actual, "shared gateway mode not as expected") + + // UDN + for _, udn := range udnNetworks { + expectedShared := make([]LB, len(tt.expectedShared)) + copy(expectedShared, tt.expectedShared) + for idx := range tt.expectedShared { + expectedShared[idx].ExternalIDs = loadBalancerExternalIDsForNetwork(namespacedServiceName(namespace, name), udn.GetNetworkName()) + expectedShared[idx].Name = udn.GetNetworkScopedLoadBalancerName(tt.expectedShared[idx].Name) + } + actual = buildPerNodeLBs(tt.service, tt.configs, defaultNodesV6, udn) + assert.Equal(t, expectedShared, actual, "shared gateway mode not as expected for UDN") + } } if tt.expectedLocal != nil { globalconfig.Gateway.Mode = globalconfig.GatewayModeLocal - actual := buildPerNodeLBs(tt.service, tt.configs, defaultNodesV6) + + // cluster default network + actual := buildPerNodeLBs(tt.service, tt.configs, defaultNodesV6, &util.DefaultNetInfo{}) assert.Equal(t, tt.expectedLocal, actual, "local gateway mode not as expected") + + // UDN + for _, udn := range udnNetworks { + expectedLocal := make([]LB, len(tt.expectedLocal)) + copy(expectedLocal, tt.expectedLocal) + for idx := range tt.expectedLocal { + expectedLocal[idx].ExternalIDs = loadBalancerExternalIDsForNetwork(namespacedServiceName(namespace, name), udn.GetNetworkName()) + expectedLocal[idx].Name = udn.GetNetworkScopedLoadBalancerName(tt.expectedLocal[idx].Name) + } + actual = buildPerNodeLBs(tt.service, tt.configs, defaultNodesV6, udn) + assert.Equal(t, expectedLocal, actual, "local gateway mode not as expected for UDN") + } } }) @@ -3229,7 +3305,7 @@ func Test_getEndpointsForService(t *testing.T) { name: "empty slices", args: args{ slices: []*discovery.EndpointSlice{}, - svc: getSampleServiceWithOnePort(httpPortName, httpPortValue, tcpv1), + svc: getSampleServiceWithOnePort(httpPortName, httpPortValue, tcp), }, wantClusterEndpoints: map[string]lbEndpoints{}, // no cluster-wide endpoints wantNodeEndpoints: map[string]map[string]lbEndpoints{}, // no local endpoints @@ -3247,19 +3323,19 @@ func Test_getEndpointsForService(t *testing.T) { Ports: []discovery.EndpointPort{ { Name: ptr.To("tcp-example"), - Protocol: &tcpv1, + Protocol: &tcp, Port: ptr.To(int32(80)), }, }, AddressType: discovery.AddressTypeIPv4, - Endpoints: kube_test.MakeReadyEndpointList(nodeA, "10.0.0.2"), + Endpoints: kubetest.MakeReadyEndpointList(nodeA, "10.0.0.2"), }, }, - svc: getSampleServiceWithOnePort("tcp-example", 80, tcpv1), + svc: getSampleServiceWithOnePort("tcp-example", 80, tcp), nodes: sets.New(nodeA), // one-node zone }, wantClusterEndpoints: map[string]lbEndpoints{ - getServicePortKey(tcpv1, "tcp-example"): {V4IPs: []string{"10.0.0.2"}, Port: 80}}, // one cluster-wide endpoint + getServicePortKey(tcp, "tcp-example"): {V4IPs: []string{"10.0.0.2"}, Port: 80}}, // one cluster-wide endpoint wantNodeEndpoints: map[string]map[string]lbEndpoints{}, // no need for local endpoints, service is not ETP or ITP local }, { @@ -3275,21 +3351,21 @@ func Test_getEndpointsForService(t *testing.T) { Ports: []discovery.EndpointPort{ { Name: ptr.To("tcp-example"), - Protocol: &tcpv1, + Protocol: &tcp, Port: ptr.To(int32(80)), }, }, AddressType: discovery.AddressTypeIPv4, - Endpoints: kube_test.MakeReadyEndpointList(nodeA, "10.0.0.2"), + Endpoints: kubetest.MakeReadyEndpointList(nodeA, "10.0.0.2"), }, }, - svc: getSampleServiceWithOnePortAndETPLocal("tcp-example", 80, tcpv1), + svc: getSampleServiceWithOnePortAndETPLocal("tcp-example", 80, tcp), nodes: sets.New(nodeA), // one-node zone }, wantClusterEndpoints: map[string]lbEndpoints{ - getServicePortKey(tcpv1, "tcp-example"): {V4IPs: []string{"10.0.0.2"}, Port: 80}}, // one cluster-wide endpoint + getServicePortKey(tcp, "tcp-example"): {V4IPs: []string{"10.0.0.2"}, Port: 80}}, // one cluster-wide endpoint wantNodeEndpoints: map[string]map[string]lbEndpoints{ - getServicePortKey(tcpv1, "tcp-example"): {nodeA: lbEndpoints{V4IPs: []string{"10.0.0.2"}, Port: 80}}}, // ETP=local, one local endpoint + getServicePortKey(tcp, "tcp-example"): {nodeA: lbEndpoints{V4IPs: []string{"10.0.0.2"}, Port: 80}}}, // ETP=local, one local endpoint }, { name: "slice with one non-local endpoint, ETP=local", @@ -3304,19 +3380,19 @@ func Test_getEndpointsForService(t *testing.T) { Ports: []discovery.EndpointPort{ { Name: ptr.To("tcp-example"), - Protocol: &tcpv1, + Protocol: &tcp, Port: ptr.To(int32(80)), }, }, AddressType: discovery.AddressTypeIPv4, - Endpoints: kube_test.MakeReadyEndpointList(nodeB, "10.0.0.2"), + Endpoints: kubetest.MakeReadyEndpointList(nodeB, "10.0.0.2"), }, }, - svc: getSampleServiceWithOnePortAndETPLocal("tcp-example", 80, tcpv1), + svc: getSampleServiceWithOnePortAndETPLocal("tcp-example", 80, tcp), nodes: sets.New(nodeA), // one-node zone }, wantClusterEndpoints: map[string]lbEndpoints{ - getServicePortKey(tcpv1, "tcp-example"): {V4IPs: []string{"10.0.0.2"}, Port: 80}}, // one cluster-wide endpoint + getServicePortKey(tcp, "tcp-example"): {V4IPs: []string{"10.0.0.2"}, Port: 80}}, // one cluster-wide endpoint wantNodeEndpoints: map[string]map[string]lbEndpoints{}, // ETP=local but no local endpoint }, { @@ -3332,15 +3408,15 @@ func Test_getEndpointsForService(t *testing.T) { Ports: []discovery.EndpointPort{ { Name: ptr.To("tcp-example"), - Protocol: &tcpv1, + Protocol: &tcp, Port: ptr.To(int32(80)), }, }, AddressType: discovery.AddressTypeFQDN, - Endpoints: kube_test.MakeReadyEndpointList(nodeA, "example.com"), + Endpoints: kubetest.MakeReadyEndpointList(nodeA, "example.com"), }, }, - svc: getSampleServiceWithOnePort("tcp-example", 80, tcpv1), + svc: getSampleServiceWithOnePort("tcp-example", 80, tcp), nodes: sets.New(nodeA), // one-node zone }, wantClusterEndpoints: map[string]lbEndpoints{}, // no endpoints @@ -3359,21 +3435,21 @@ func Test_getEndpointsForService(t *testing.T) { Ports: []discovery.EndpointPort{ { Name: ptr.To("tcp-example"), - Protocol: &tcpv1, + Protocol: &tcp, Port: ptr.To(int32(80)), }, }, AddressType: discovery.AddressTypeIPv4, - Endpoints: kube_test.MakeReadyEndpointList(nodeB, "10.0.0.2"), + Endpoints: kubetest.MakeReadyEndpointList(nodeB, "10.0.0.2"), }, }, - svc: getSampleServiceWithOnePortAndETPLocal("tcp-example", 80, tcpv1), + svc: getSampleServiceWithOnePortAndETPLocal("tcp-example", 80, tcp), nodes: sets.New(nodeA, nodeB), // one-node zone }, wantClusterEndpoints: map[string]lbEndpoints{ - getServicePortKey(tcpv1, "tcp-example"): {V4IPs: []string{"10.0.0.2"}, Port: 80}}, // one cluster-wide endpoint + getServicePortKey(tcp, "tcp-example"): {V4IPs: []string{"10.0.0.2"}, Port: 80}}, // one cluster-wide endpoint wantNodeEndpoints: map[string]map[string]lbEndpoints{ - getServicePortKey(tcpv1, "tcp-example"): {nodeB: lbEndpoints{V4IPs: []string{"10.0.0.2"}, Port: 80}}}, // endpoint on nodeB + getServicePortKey(tcp, "tcp-example"): {nodeB: lbEndpoints{V4IPs: []string{"10.0.0.2"}, Port: 80}}}, // endpoint on nodeB }, { name: "slice with different port name than the service", @@ -3393,10 +3469,10 @@ func Test_getEndpointsForService(t *testing.T) { }, }, AddressType: discovery.AddressTypeIPv4, - Endpoints: kube_test.MakeReadyEndpointList(nodeA, "10.0.0.2"), + Endpoints: kubetest.MakeReadyEndpointList(nodeA, "10.0.0.2"), }, }, - svc: getSampleServiceWithOnePort("tcp-example", 80, tcpv1), + svc: getSampleServiceWithOnePort("tcp-example", 80, tcp), nodes: sets.New(nodeA), // one-node zone }, wantClusterEndpoints: map[string]lbEndpoints{}, // no cluster-wide endpoints @@ -3414,22 +3490,22 @@ func Test_getEndpointsForService(t *testing.T) { }, Ports: []discovery.EndpointPort{ { - Protocol: &tcpv1, + Protocol: &tcp, Port: ptr.To(int32(8080)), }, }, AddressType: discovery.AddressTypeIPv4, - Endpoints: kube_test.MakeReadyEndpointList(nodeA, "10.0.0.2"), + Endpoints: kubetest.MakeReadyEndpointList(nodeA, "10.0.0.2"), }, }, - svc: getSampleServiceWithOnePortAndETPLocal("", 80, tcpv1), // port with no name - nodes: sets.New(nodeA), // one-node zone + svc: getSampleServiceWithOnePortAndETPLocal("", 80, tcp), // port with no name + nodes: sets.New(nodeA), // one-node zone }, wantClusterEndpoints: map[string]lbEndpoints{ - getServicePortKey(tcpv1, ""): {V4IPs: []string{"10.0.0.2"}, Port: 8080}}, // one cluster-wide endpoint + getServicePortKey(tcp, ""): {V4IPs: []string{"10.0.0.2"}, Port: 8080}}, // one cluster-wide endpoint wantNodeEndpoints: map[string]map[string]lbEndpoints{ - getServicePortKey(tcpv1, ""): {nodeA: lbEndpoints{V4IPs: []string{"10.0.0.2"}, Port: 8080}}}, // one local endpoint + getServicePortKey(tcp, ""): {nodeA: lbEndpoints{V4IPs: []string{"10.0.0.2"}, Port: 8080}}}, // one local endpoint }, { name: "slice with an IPv6 endpoint", @@ -3449,14 +3525,14 @@ func Test_getEndpointsForService(t *testing.T) { }, }, AddressType: discovery.AddressTypeIPv6, - Endpoints: kube_test.MakeReadyEndpointList(nodeA, "2001:db2::2"), + Endpoints: kubetest.MakeReadyEndpointList(nodeA, "2001:db2::2"), }, }, - svc: getSampleServiceWithOnePort("tcp-example", 80, tcpv1), + svc: getSampleServiceWithOnePort("tcp-example", 80, tcp), nodes: sets.New(nodeA), // one-node zone }, wantClusterEndpoints: map[string]lbEndpoints{ - getServicePortKey(tcpv1, "tcp-example"): {V6IPs: []string{"2001:db2::2"}, Port: 80}}, // one cluster-wide endpoint + getServicePortKey(tcp, "tcp-example"): {V6IPs: []string{"2001:db2::2"}, Port: 80}}, // one cluster-wide endpoint wantNodeEndpoints: map[string]map[string]lbEndpoints{}, // local endpoints not filled in, since service is not ETP or ITP local }, { @@ -3477,7 +3553,7 @@ func Test_getEndpointsForService(t *testing.T) { }, }, AddressType: discovery.AddressTypeIPv4, - Endpoints: kube_test.MakeReadyEndpointList(nodeA, "10.0.0.2"), + Endpoints: kubetest.MakeReadyEndpointList(nodeA, "10.0.0.2"), }, { ObjectMeta: metav1.ObjectMeta{ @@ -3493,16 +3569,16 @@ func Test_getEndpointsForService(t *testing.T) { }, }, AddressType: discovery.AddressTypeIPv6, - Endpoints: kube_test.MakeReadyEndpointList(nodeA, "2001:db2::2"), + Endpoints: kubetest.MakeReadyEndpointList(nodeA, "2001:db2::2"), }, }, - svc: getSampleServiceWithOnePortAndETPLocal("tcp-example", 80, tcpv1), + svc: getSampleServiceWithOnePortAndETPLocal("tcp-example", 80, tcp), nodes: sets.New(nodeA), // one-node zone }, wantClusterEndpoints: map[string]lbEndpoints{ - getServicePortKey(tcpv1, "tcp-example"): {V4IPs: []string{"10.0.0.2"}, V6IPs: []string{"2001:db2::2"}, Port: 80}}, + getServicePortKey(tcp, "tcp-example"): {V4IPs: []string{"10.0.0.2"}, V6IPs: []string{"2001:db2::2"}, Port: 80}}, wantNodeEndpoints: map[string]map[string]lbEndpoints{ - getServicePortKey(tcpv1, "tcp-example"): {nodeA: lbEndpoints{V4IPs: []string{"10.0.0.2"}, V6IPs: []string{"2001:db2::2"}, Port: 80}}}, + getServicePortKey(tcp, "tcp-example"): {nodeA: lbEndpoints{V4IPs: []string{"10.0.0.2"}, V6IPs: []string{"2001:db2::2"}, Port: 80}}}, }, { name: "one slice with a duplicate address in the same endpoint", @@ -3522,14 +3598,14 @@ func Test_getEndpointsForService(t *testing.T) { }, }, AddressType: discovery.AddressTypeIPv4, - Endpoints: kube_test.MakeReadyEndpointList(nodeA, "10.0.0.2", "10.0.0.2"), + Endpoints: kubetest.MakeReadyEndpointList(nodeA, "10.0.0.2", "10.0.0.2"), }, }, - svc: getSampleServiceWithOnePort("tcp-example", 80, tcpv1), + svc: getSampleServiceWithOnePort("tcp-example", 80, tcp), nodes: sets.New(nodeA), // one-node zone }, wantClusterEndpoints: map[string]lbEndpoints{ - getServicePortKey(tcpv1, "tcp-example"): {V4IPs: []string{"10.0.0.2"}, Port: 80}}, + getServicePortKey(tcp, "tcp-example"): {V4IPs: []string{"10.0.0.2"}, Port: 80}}, wantNodeEndpoints: map[string]map[string]lbEndpoints{}, // local endpoints not filled in, since service is not ETP or ITP local }, { @@ -3550,14 +3626,14 @@ func Test_getEndpointsForService(t *testing.T) { }, }, AddressType: discovery.AddressTypeIPv4, - Endpoints: []discovery.Endpoint{kube_test.MakeReadyEndpoint(nodeA, "10.0.0.2"), kube_test.MakeReadyEndpoint(nodeA, "10.0.0.2")}, + Endpoints: []discovery.Endpoint{kubetest.MakeReadyEndpoint(nodeA, "10.0.0.2"), kubetest.MakeReadyEndpoint(nodeA, "10.0.0.2")}, }, }, - svc: getSampleServiceWithOnePort("tcp-example", 80, tcpv1), + svc: getSampleServiceWithOnePort("tcp-example", 80, tcp), nodes: sets.New(nodeA), // one-node zone }, wantClusterEndpoints: map[string]lbEndpoints{ - getServicePortKey(tcpv1, "tcp-example"): {V4IPs: []string{"10.0.0.2"}, Port: 80}}, + getServicePortKey(tcp, "tcp-example"): {V4IPs: []string{"10.0.0.2"}, Port: 80}}, wantNodeEndpoints: map[string]map[string]lbEndpoints{}, // local endpoints not filled in, since service is not ETP or ITP local }, { @@ -3578,7 +3654,7 @@ func Test_getEndpointsForService(t *testing.T) { }, }, AddressType: discovery.AddressTypeIPv4, - Endpoints: kube_test.MakeReadyEndpointList(nodeA, "10.0.0.2", "10.1.1.2"), + Endpoints: kubetest.MakeReadyEndpointList(nodeA, "10.0.0.2", "10.1.1.2"), }, { ObjectMeta: metav1.ObjectMeta{ @@ -3594,14 +3670,14 @@ func Test_getEndpointsForService(t *testing.T) { }, }, AddressType: discovery.AddressTypeIPv4, - Endpoints: kube_test.MakeReadyEndpointList(nodeA, "10.0.0.2", "10.2.2.2"), + Endpoints: kubetest.MakeReadyEndpointList(nodeA, "10.0.0.2", "10.2.2.2"), }, }, - svc: getSampleServiceWithOnePort("tcp-example", 80, tcpv1), + svc: getSampleServiceWithOnePort("tcp-example", 80, tcp), nodes: sets.New(nodeA), // one-node zone }, wantClusterEndpoints: map[string]lbEndpoints{ - getServicePortKey(tcpv1, "tcp-example"): {V4IPs: []string{"10.0.0.2", "10.1.1.2", "10.2.2.2"}, Port: 80}}, + getServicePortKey(tcp, "tcp-example"): {V4IPs: []string{"10.0.0.2", "10.1.1.2", "10.2.2.2"}, Port: 80}}, wantNodeEndpoints: map[string]map[string]lbEndpoints{}, // local endpoints not filled in, since service is not ETP or ITP local }, { @@ -3622,7 +3698,7 @@ func Test_getEndpointsForService(t *testing.T) { }, }, AddressType: discovery.AddressTypeIPv4, - Endpoints: kube_test.MakeReadyEndpointList(nodeA, "10.0.0.2", "10.1.1.2"), + Endpoints: kubetest.MakeReadyEndpointList(nodeA, "10.0.0.2", "10.1.1.2"), }, { ObjectMeta: metav1.ObjectMeta{ @@ -3638,18 +3714,18 @@ func Test_getEndpointsForService(t *testing.T) { }, }, AddressType: discovery.AddressTypeIPv4, - Endpoints: kube_test.MakeReadyEndpointList(nodeA, "10.0.0.3", "10.2.2.3"), + Endpoints: kubetest.MakeReadyEndpointList(nodeA, "10.0.0.3", "10.2.2.3"), }, }, - svc: getSampleServiceWithTwoPortsAndETPLocal("tcp-example", "other-port", 80, 8080, tcpv1, tcpv1), + svc: getSampleServiceWithTwoPortsAndETPLocal("tcp-example", "other-port", 80, 8080, tcp, tcp), nodes: sets.New(nodeA), // one-node zone }, wantClusterEndpoints: map[string]lbEndpoints{ - getServicePortKey(tcpv1, "tcp-example"): {V4IPs: []string{"10.0.0.2", "10.1.1.2"}, Port: 80}, - getServicePortKey(tcpv1, "other-port"): {V4IPs: []string{"10.0.0.3", "10.2.2.3"}, Port: 8080}}, + getServicePortKey(tcp, "tcp-example"): {V4IPs: []string{"10.0.0.2", "10.1.1.2"}, Port: 80}, + getServicePortKey(tcp, "other-port"): {V4IPs: []string{"10.0.0.3", "10.2.2.3"}, Port: 8080}}, wantNodeEndpoints: map[string]map[string]lbEndpoints{ - getServicePortKey(tcpv1, "tcp-example"): {nodeA: lbEndpoints{V4IPs: []string{"10.0.0.2", "10.1.1.2"}, Port: 80}}, - getServicePortKey(tcpv1, "other-port"): {nodeA: lbEndpoints{V4IPs: []string{"10.0.0.3", "10.2.2.3"}, Port: 8080}}}, + getServicePortKey(tcp, "tcp-example"): {nodeA: lbEndpoints{V4IPs: []string{"10.0.0.2", "10.1.1.2"}, Port: 80}}, + getServicePortKey(tcp, "other-port"): {nodeA: lbEndpoints{V4IPs: []string{"10.0.0.3", "10.2.2.3"}, Port: 8080}}}, }, { name: "multiples slices with different ports, OVN zone with two nodes, ETP=local", @@ -3669,7 +3745,7 @@ func Test_getEndpointsForService(t *testing.T) { }, }, AddressType: discovery.AddressTypeIPv4, - Endpoints: kube_test.MakeReadyEndpointList(nodeA, "10.0.0.2", "10.1.1.2"), + Endpoints: kubetest.MakeReadyEndpointList(nodeA, "10.0.0.2", "10.1.1.2"), }, { ObjectMeta: metav1.ObjectMeta{ @@ -3685,18 +3761,18 @@ func Test_getEndpointsForService(t *testing.T) { }, }, AddressType: discovery.AddressTypeIPv4, - Endpoints: kube_test.MakeReadyEndpointList(nodeB, "10.0.0.3", "10.2.2.3"), + Endpoints: kubetest.MakeReadyEndpointList(nodeB, "10.0.0.3", "10.2.2.3"), }, }, - svc: getSampleServiceWithTwoPortsAndETPLocal("tcp-example", "other-port", 80, 8080, tcpv1, tcpv1), + svc: getSampleServiceWithTwoPortsAndETPLocal("tcp-example", "other-port", 80, 8080, tcp, tcp), nodes: sets.New(nodeA, nodeB), // zone with two nodes }, wantClusterEndpoints: map[string]lbEndpoints{ - getServicePortKey(tcpv1, "tcp-example"): {V4IPs: []string{"10.0.0.2", "10.1.1.2"}, Port: 80}, - getServicePortKey(tcpv1, "other-port"): {V4IPs: []string{"10.0.0.3", "10.2.2.3"}, Port: 8080}}, + getServicePortKey(tcp, "tcp-example"): {V4IPs: []string{"10.0.0.2", "10.1.1.2"}, Port: 80}, + getServicePortKey(tcp, "other-port"): {V4IPs: []string{"10.0.0.3", "10.2.2.3"}, Port: 8080}}, wantNodeEndpoints: map[string]map[string]lbEndpoints{ - getServicePortKey(tcpv1, "tcp-example"): {nodeA: lbEndpoints{V4IPs: []string{"10.0.0.2", "10.1.1.2"}, Port: 80}}, - getServicePortKey(tcpv1, "other-port"): {nodeB: lbEndpoints{V4IPs: []string{"10.0.0.3", "10.2.2.3"}, Port: 8080}}}, + getServicePortKey(tcp, "tcp-example"): {nodeA: lbEndpoints{V4IPs: []string{"10.0.0.2", "10.1.1.2"}, Port: 80}}, + getServicePortKey(tcp, "other-port"): {nodeB: lbEndpoints{V4IPs: []string{"10.0.0.3", "10.2.2.3"}, Port: 8080}}}, }, { name: "slice with a mix of ready and terminating (serving and non-serving) endpoints", @@ -3717,20 +3793,20 @@ func Test_getEndpointsForService(t *testing.T) { }, AddressType: discovery.AddressTypeIPv6, Endpoints: []discovery.Endpoint{ - kube_test.MakeReadyEndpoint(nodeA, "2001:db2::2"), - kube_test.MakeReadyEndpoint(nodeA, "2001:db2::3"), - kube_test.MakeTerminatingServingEndpoint(nodeA, "2001:db2::4"), - kube_test.MakeTerminatingServingEndpoint(nodeA, "2001:db2::5"), - kube_test.MakeTerminatingNonServingEndpoint(nodeA, "2001:db2::6"), // ignored + kubetest.MakeReadyEndpoint(nodeA, "2001:db2::2"), + kubetest.MakeReadyEndpoint(nodeA, "2001:db2::3"), + kubetest.MakeTerminatingServingEndpoint(nodeA, "2001:db2::4"), + kubetest.MakeTerminatingServingEndpoint(nodeA, "2001:db2::5"), + kubetest.MakeTerminatingNonServingEndpoint(nodeA, "2001:db2::6"), // ignored }, }, }, - svc: getSampleServiceWithOnePort("tcp-example", 80, tcpv1), + svc: getSampleServiceWithOnePort("tcp-example", 80, tcp), nodes: sets.New(nodeA), // one-node zone }, wantClusterEndpoints: map[string]lbEndpoints{ - getServicePortKey(tcpv1, "tcp-example"): {V6IPs: []string{"2001:db2::2", "2001:db2::3"}, Port: 80}}, + getServicePortKey(tcp, "tcp-example"): {V6IPs: []string{"2001:db2::2", "2001:db2::3"}, Port: 80}}, wantNodeEndpoints: map[string]map[string]lbEndpoints{}, // local endpoints not filled in, since service is not ETP or ITP local }, { @@ -3752,18 +3828,18 @@ func Test_getEndpointsForService(t *testing.T) { }, AddressType: discovery.AddressTypeIPv6, Endpoints: []discovery.Endpoint{ - kube_test.MakeTerminatingServingEndpoint(nodeA, "2001:db2::4"), - kube_test.MakeTerminatingServingEndpoint(nodeA, "2001:db2::5"), - kube_test.MakeTerminatingNonServingEndpoint(nodeA, "2001:db2::6"), - kube_test.MakeTerminatingNonServingEndpoint(nodeA, "2001:db2::7"), + kubetest.MakeTerminatingServingEndpoint(nodeA, "2001:db2::4"), + kubetest.MakeTerminatingServingEndpoint(nodeA, "2001:db2::5"), + kubetest.MakeTerminatingNonServingEndpoint(nodeA, "2001:db2::6"), + kubetest.MakeTerminatingNonServingEndpoint(nodeA, "2001:db2::7"), }, }, }, - svc: getSampleServiceWithOnePort("tcp-example", 80, tcpv1), + svc: getSampleServiceWithOnePort("tcp-example", 80, tcp), nodes: sets.New(nodeA), // one-node zone }, wantClusterEndpoints: map[string]lbEndpoints{ - getServicePortKey(tcpv1, "tcp-example"): {V6IPs: []string{"2001:db2::4", "2001:db2::5"}, Port: 80}}, + getServicePortKey(tcp, "tcp-example"): {V6IPs: []string{"2001:db2::4", "2001:db2::5"}, Port: 80}}, wantNodeEndpoints: map[string]map[string]lbEndpoints{}, // local endpoints not filled in, since service is not ETP or ITP local }, { @@ -3785,12 +3861,12 @@ func Test_getEndpointsForService(t *testing.T) { }, AddressType: discovery.AddressTypeIPv6, Endpoints: []discovery.Endpoint{ - kube_test.MakeTerminatingNonServingEndpoint(nodeA, "2001:db2::6"), - kube_test.MakeTerminatingNonServingEndpoint(nodeA, "2001:db2::7"), + kubetest.MakeTerminatingNonServingEndpoint(nodeA, "2001:db2::6"), + kubetest.MakeTerminatingNonServingEndpoint(nodeA, "2001:db2::7"), }, }, }, - svc: getSampleServiceWithOnePort("tcp-example", 80, tcpv1), + svc: getSampleServiceWithOnePort("tcp-example", 80, tcp), nodes: sets.New(nodeA), // one-node zone }, wantClusterEndpoints: map[string]lbEndpoints{}, // no cluster-wide endpoints @@ -3816,8 +3892,8 @@ func Test_getEndpointsForService(t *testing.T) { }, AddressType: discovery.AddressTypeIPv6, Endpoints: []discovery.Endpoint{ - kube_test.MakeTerminatingNonServingEndpoint(nodeA, "2001:db2::2"), // ignored - kube_test.MakeTerminatingServingEndpoint(nodeA, "2001:db2::3"), + kubetest.MakeTerminatingNonServingEndpoint(nodeA, "2001:db2::2"), // ignored + kubetest.MakeTerminatingServingEndpoint(nodeA, "2001:db2::3"), }, }, { @@ -3835,16 +3911,16 @@ func Test_getEndpointsForService(t *testing.T) { }, AddressType: discovery.AddressTypeIPv6, Endpoints: []discovery.Endpoint{ - kube_test.MakeTerminatingServingEndpoint(nodeA, "2001:db2::4"), - kube_test.MakeTerminatingNonServingEndpoint(nodeA, "2001:db2::5"), // ignored + kubetest.MakeTerminatingServingEndpoint(nodeA, "2001:db2::4"), + kubetest.MakeTerminatingNonServingEndpoint(nodeA, "2001:db2::5"), // ignored }, }, }, - svc: getSampleServiceWithOnePort("tcp-example", 80, tcpv1), + svc: getSampleServiceWithOnePort("tcp-example", 80, tcp), nodes: sets.New(nodeA), // one-node zone }, wantClusterEndpoints: map[string]lbEndpoints{ - getServicePortKey(tcpv1, "tcp-example"): {V6IPs: []string{"2001:db2::3", "2001:db2::4"}, Port: 80}}, + getServicePortKey(tcp, "tcp-example"): {V6IPs: []string{"2001:db2::3", "2001:db2::4"}, Port: 80}}, wantNodeEndpoints: map[string]map[string]lbEndpoints{}, // local endpoints not filled in, since service is not ETP or ITP local }, { @@ -3866,7 +3942,7 @@ func Test_getEndpointsForService(t *testing.T) { }, AddressType: discovery.AddressTypeIPv6, Endpoints: []discovery.Endpoint{ - kube_test.MakeTerminatingNonServingEndpoint(nodeA, "2001:db2::2"), + kubetest.MakeTerminatingNonServingEndpoint(nodeA, "2001:db2::2"), }, }, { @@ -3884,11 +3960,11 @@ func Test_getEndpointsForService(t *testing.T) { }, AddressType: discovery.AddressTypeIPv6, Endpoints: []discovery.Endpoint{ - kube_test.MakeTerminatingNonServingEndpoint(nodeA, "2001:db2::5"), + kubetest.MakeTerminatingNonServingEndpoint(nodeA, "2001:db2::5"), }, }, }, - svc: getSampleServiceWithOnePort("tcp-example", 80, tcpv1), + svc: getSampleServiceWithOnePort("tcp-example", 80, tcp), nodes: sets.New(nodeA), // one-node zone }, wantClusterEndpoints: map[string]lbEndpoints{}, // no cluster-wide endpoints @@ -3914,9 +3990,9 @@ func Test_getEndpointsForService(t *testing.T) { }, AddressType: discovery.AddressTypeIPv4, Endpoints: []discovery.Endpoint{ - kube_test.MakeReadyEndpoint(nodeA, "10.0.0.2"), - kube_test.MakeTerminatingServingEndpoint(nodeA, "10.0.0.3"), - kube_test.MakeTerminatingNonServingEndpoint(nodeA, "10.0.0.4"), // ignored + kubetest.MakeReadyEndpoint(nodeA, "10.0.0.2"), + kubetest.MakeTerminatingServingEndpoint(nodeA, "10.0.0.3"), + kubetest.MakeTerminatingNonServingEndpoint(nodeA, "10.0.0.4"), // ignored }, }, { @@ -3934,17 +4010,17 @@ func Test_getEndpointsForService(t *testing.T) { }, AddressType: discovery.AddressTypeIPv6, Endpoints: []discovery.Endpoint{ - kube_test.MakeReadyEndpoint(nodeA, "2001:db2::2"), - kube_test.MakeTerminatingServingEndpoint(nodeA, "2001:db2::3"), - kube_test.MakeTerminatingNonServingEndpoint(nodeA, "2001:db2::4"), // ignored + kubetest.MakeReadyEndpoint(nodeA, "2001:db2::2"), + kubetest.MakeTerminatingServingEndpoint(nodeA, "2001:db2::3"), + kubetest.MakeTerminatingNonServingEndpoint(nodeA, "2001:db2::4"), // ignored }, }, }, - svc: getSampleServiceWithOnePort("tcp-example", 80, tcpv1), + svc: getSampleServiceWithOnePort("tcp-example", 80, tcp), nodes: sets.New(nodeA), // one-node zone }, wantClusterEndpoints: map[string]lbEndpoints{ - getServicePortKey(tcpv1, "tcp-example"): {V4IPs: []string{"10.0.0.2"}, V6IPs: []string{"2001:db2::2"}, Port: 80}}, + getServicePortKey(tcp, "tcp-example"): {V4IPs: []string{"10.0.0.2"}, V6IPs: []string{"2001:db2::2"}, Port: 80}}, wantNodeEndpoints: map[string]map[string]lbEndpoints{}, // local endpoints not filled in, since service is not ETP or ITP local }, { @@ -3966,8 +4042,8 @@ func Test_getEndpointsForService(t *testing.T) { }, AddressType: discovery.AddressTypeIPv4, Endpoints: []discovery.Endpoint{ - kube_test.MakeTerminatingServingEndpoint(nodeA, "10.0.0.3"), - kube_test.MakeTerminatingNonServingEndpoint(nodeA, "10.0.0.4"), + kubetest.MakeTerminatingServingEndpoint(nodeA, "10.0.0.3"), + kubetest.MakeTerminatingNonServingEndpoint(nodeA, "10.0.0.4"), }, }, { @@ -3985,16 +4061,16 @@ func Test_getEndpointsForService(t *testing.T) { }, AddressType: discovery.AddressTypeIPv6, Endpoints: []discovery.Endpoint{ - kube_test.MakeTerminatingServingEndpoint(nodeA, "2001:db2::3"), - kube_test.MakeTerminatingNonServingEndpoint(nodeA, "2001:db2::4"), + kubetest.MakeTerminatingServingEndpoint(nodeA, "2001:db2::3"), + kubetest.MakeTerminatingNonServingEndpoint(nodeA, "2001:db2::4"), }, }, }, - svc: getSampleServiceWithOnePort("tcp-example", 80, tcpv1), + svc: getSampleServiceWithOnePort("tcp-example", 80, tcp), nodes: sets.New(nodeA), // one-node zone }, wantClusterEndpoints: map[string]lbEndpoints{ - getServicePortKey(tcpv1, "tcp-example"): {V4IPs: []string{"10.0.0.3"}, V6IPs: []string{"2001:db2::3"}, Port: 80}}, + getServicePortKey(tcp, "tcp-example"): {V4IPs: []string{"10.0.0.3"}, V6IPs: []string{"2001:db2::3"}, Port: 80}}, wantNodeEndpoints: map[string]map[string]lbEndpoints{}, // local endpoints not filled in, since service is not ETP or ITP local }, { @@ -4016,7 +4092,7 @@ func Test_getEndpointsForService(t *testing.T) { }, AddressType: discovery.AddressTypeIPv4, Endpoints: []discovery.Endpoint{ - kube_test.MakeTerminatingNonServingEndpoint(nodeA, "10.0.0.4"), // ignored + kubetest.MakeTerminatingNonServingEndpoint(nodeA, "10.0.0.4"), // ignored }, }, { @@ -4034,11 +4110,11 @@ func Test_getEndpointsForService(t *testing.T) { }, AddressType: discovery.AddressTypeIPv6, Endpoints: []discovery.Endpoint{ - kube_test.MakeTerminatingNonServingEndpoint(nodeA, "2001:db2::4"), // ignored + kubetest.MakeTerminatingNonServingEndpoint(nodeA, "2001:db2::4"), // ignored }, }, }, - svc: getSampleServiceWithOnePort("tcp-example", 80, tcpv1), + svc: getSampleServiceWithOnePort("tcp-example", 80, tcp), nodes: sets.New(nodeA), // one-node zone }, @@ -4064,9 +4140,9 @@ func Test_getEndpointsForService(t *testing.T) { }, AddressType: discovery.AddressTypeIPv4, Endpoints: []discovery.Endpoint{ - kube_test.MakeReadyEndpoint(nodeA, "10.0.0.2"), // included - kube_test.MakeTerminatingServingEndpoint(nodeA, "10.0.0.3"), // included - kube_test.MakeTerminatingNonServingEndpoint(nodeA, "10.0.0.4"), // included + kubetest.MakeReadyEndpoint(nodeA, "10.0.0.2"), // included + kubetest.MakeTerminatingServingEndpoint(nodeA, "10.0.0.3"), // included + kubetest.MakeTerminatingNonServingEndpoint(nodeA, "10.0.0.4"), // included }, }, { @@ -4084,17 +4160,17 @@ func Test_getEndpointsForService(t *testing.T) { }, AddressType: discovery.AddressTypeIPv6, Endpoints: []discovery.Endpoint{ - kube_test.MakeReadyEndpoint(nodeA, "2001:db2::2"), // included - kube_test.MakeTerminatingServingEndpoint(nodeA, "2001:db2::3"), // included - kube_test.MakeTerminatingNonServingEndpoint(nodeA, "2001:db2::4"), // included + kubetest.MakeReadyEndpoint(nodeA, "2001:db2::2"), // included + kubetest.MakeTerminatingServingEndpoint(nodeA, "2001:db2::3"), // included + kubetest.MakeTerminatingNonServingEndpoint(nodeA, "2001:db2::4"), // included }, }, }, - svc: getSampleServiceWithOnePortAndPublishNotReadyAddresses("tcp-example", 80, tcpv1), // <-- publishNotReadyAddresses=true - nodes: sets.New(nodeA), // one-node zone + svc: getSampleServiceWithOnePortAndPublishNotReadyAddresses("tcp-example", 80, tcp), // <-- publishNotReadyAddresses=true + nodes: sets.New(nodeA), // one-node zone }, wantClusterEndpoints: map[string]lbEndpoints{ - getServicePortKey(tcpv1, "tcp-example"): { + getServicePortKey(tcp, "tcp-example"): { V4IPs: []string{"10.0.0.2", "10.0.0.3", "10.0.0.4"}, V6IPs: []string{"2001:db2::2", "2001:db2::3", "2001:db2::4"}, Port: 80}}, @@ -4103,7 +4179,8 @@ func Test_getEndpointsForService(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - portToClusterEndpoints, portToNodeToEndpoints := getEndpointsForService(tt.args.slices, tt.args.svc, tt.args.nodes) + portToClusterEndpoints, portToNodeToEndpoints := getEndpointsForService( + tt.args.slices, tt.args.svc, tt.args.nodes, types.DefaultNetworkName) assert.Equal(t, tt.wantClusterEndpoints, portToClusterEndpoints) assert.Equal(t, tt.wantNodeEndpoints, portToNodeToEndpoints) @@ -4167,7 +4244,7 @@ func Test_makeNodeSwitchTargetIPs(t *testing.T) { }, { name: "LB service with ETP=local, endpoint count changes", - service: getSampleServiceWithOnePortAndETPLocal("tcp-example", 80, tcpv1), + service: getSampleServiceWithOnePortAndETPLocal("tcp-example", 80, tcp), config: &lbConfig{ vips: []string{"1.2.3.4", "fe10::1"}, protocol: v1.ProtocolTCP, @@ -4195,7 +4272,7 @@ func Test_makeNodeSwitchTargetIPs(t *testing.T) { }, { name: "LB service with ETP=local, endpoint count is the same", - service: getSampleServiceWithOnePortAndETPLocal("tcp-example", 80, tcpv1), + service: getSampleServiceWithOnePortAndETPLocal("tcp-example", 80, tcp), config: &lbConfig{ vips: []string{"1.2.3.4", "fe10::1"}, protocol: v1.ProtocolTCP, @@ -4222,7 +4299,7 @@ func Test_makeNodeSwitchTargetIPs(t *testing.T) { }, { name: "LB service with ETP=local, no local endpoints left", - service: getSampleServiceWithOnePortAndETPLocal("tcp-example", 80, tcpv1), + service: getSampleServiceWithOnePortAndETPLocal("tcp-example", 80, tcp), config: &lbConfig{ vips: []string{"1.2.3.4", "fe10::1"}, protocol: v1.ProtocolTCP, @@ -4244,7 +4321,7 @@ func Test_makeNodeSwitchTargetIPs(t *testing.T) { // OCP HACK BEGIN { name: "LB service with ETP=local, no local endpoints left, localWithFallback annotation", - service: addFallbackAnnotationToService(getSampleServiceWithOnePortAndETPLocal("tcp-example", 80, tcpv1)), + service: addFallbackAnnotationToService(getSampleServiceWithOnePortAndETPLocal("tcp-example", 80, tcp)), config: &lbConfig{ vips: []string{"1.2.3.4", "fe10::1"}, protocol: v1.ProtocolTCP, diff --git a/go-controller/pkg/ovn/controller/services/loadbalancer.go b/go-controller/pkg/ovn/controller/services/loadbalancer.go index 8972b7072a..f3348b7236 100644 --- a/go-controller/pkg/ovn/controller/services/loadbalancer.go +++ b/go-controller/pkg/ovn/controller/services/loadbalancer.go @@ -131,8 +131,8 @@ func toNBTemplateList(tlbs []*templateLoadBalancer) []TemplateMap { // // It is assumed that names are meaningful and somewhat stable, to minimize churn. This // function doesn't work with Load_Balancers without a name. -func EnsureLBs(nbClient libovsdbclient.Client, service *corev1.Service, existingCacheLBs []LB, LBs []LB) error { - externalIDs := util.ExternalIDsForObject(service) +func EnsureLBs(nbClient libovsdbclient.Client, service *corev1.Service, existingCacheLBs []LB, LBs []LB, netInfo util.NetInfo) error { + externalIDs := getExternalIDsForLoadBalancer(service, netInfo) existingByName := make(map[string]*LB, len(existingCacheLBs)) toDelete := make(map[string]*LB, len(existingCacheLBs)) @@ -425,25 +425,52 @@ func DeleteLBs(nbClient libovsdbclient.Client, uuids []string) error { return nil } -// getLBs returns a slice of load balancers found in OVN. -func getLBs(nbClient libovsdbclient.Client, allTemplates TemplateMap) ([]*LB, error) { - _, out, err := _getLBsCommon(nbClient, allTemplates, false) +// getAllLBs returns a slice of load balancers found in OVN. +func getAllLBs(nbClient libovsdbclient.Client, allTemplates TemplateMap) ([]*LB, error) { + _, out, err := _getLBsCommon(nbClient, allTemplates, false, true, nil) return out, err } -// getServiceLBs returns a set of services as well as a slice of load balancers found in OVN. -func getServiceLBs(nbClient libovsdbclient.Client, allTemplates TemplateMap) (sets.Set[string], []*LB, error) { - return _getLBsCommon(nbClient, allTemplates, true) +// getServiceLBsForNetwork returns the services and OVN load balancers for the network specified in netInfo. +func getServiceLBsForNetwork(nbClient libovsdbclient.Client, allTemplates TemplateMap, netInfo util.NetInfo) (sets.Set[string], []*LB, error) { + return _getLBsCommon(nbClient, allTemplates, true, false, netInfo) } -func _getLBsCommon(nbClient libovsdbclient.Client, allTemplates TemplateMap, withServiceOwner bool) (sets.Set[string], []*LB, error) { - lbs, err := libovsdbops.ListLoadBalancers(nbClient) +func _getLBsCommon(nbClient libovsdbclient.Client, allTemplates TemplateMap, withServiceOwner bool, includeAllNetworks bool, netInfo util.NetInfo) (sets.Set[string], []*LB, error) { + + // Lookup network name and network role in the OVN external IDs to check whether + // the OVN element with the input externalIDs belongs to this network. + belongsToThisNetwork := func(externalIDs map[string]string) bool { + if !util.IsNetworkSegmentationSupportEnabled() { + return true + } + + network, ok := externalIDs[types.NetworkExternalID] + + if netInfo == nil { + return true + } + + if netInfo.IsDefault() { + return !ok + } + + // filter out anything belonging to default and (if any) secondary networks + role, ok := externalIDs[types.NetworkRoleExternalID] + return ok && role == types.NetworkRolePrimary && network == netInfo.GetNetworkName() + } + + p := func(item *nbdb.LoadBalancer) bool { + return includeAllNetworks || belongsToThisNetwork(item.ExternalIDs) + } + + lbs, err := libovsdbops.FindLoadBalancersWithPredicate(nbClient, p) if err != nil { return nil, nil, fmt.Errorf("could not list load_balancer: %w", err) } - services := sets.New[string]() - outMap := make(map[string]*LB, len(lbs)) + services := sets.New[string]() // all services found in load balancers + outMap := make(map[string]*LB, len(lbs)) // UUID -> *LB for _, lb := range lbs { // Skip load balancers unrelated to service, or w/out an owner (aka namespace+name) @@ -481,8 +508,9 @@ func _getLBsCommon(nbClient libovsdbclient.Client, allTemplates TemplateMap, wit // Switches ps := func(item *nbdb.LogicalSwitch) bool { - return len(item.LoadBalancer) > 0 + return len(item.LoadBalancer) > 0 && (includeAllNetworks || belongsToThisNetwork(item.ExternalIDs)) } + switches, err := libovsdbops.FindLogicalSwitchesWithPredicate(nbClient, ps) if err != nil { return nil, nil, fmt.Errorf("could not list logical switches: %w", err) @@ -497,7 +525,7 @@ func _getLBsCommon(nbClient libovsdbclient.Client, allTemplates TemplateMap, wit // Routers pr := func(item *nbdb.LogicalRouter) bool { - return len(item.LoadBalancer) > 0 + return len(item.LoadBalancer) > 0 && (includeAllNetworks || belongsToThisNetwork(item.ExternalIDs)) } routers, err := libovsdbops.FindLogicalRoutersWithPredicate(nbClient, pr) if err != nil { @@ -511,10 +539,39 @@ func _getLBsCommon(nbClient libovsdbclient.Client, allTemplates TemplateMap, wit } } - // Groups + // LB Groups pg := func(item *nbdb.LoadBalancerGroup) bool { - return len(item.LoadBalancer) > 0 + if len(item.LoadBalancer) == 0 { + return false + } + + if !util.IsNetworkSegmentationSupportEnabled() || includeAllNetworks { + return true + } + + if netInfo == nil { + return true + } + + // LB groups have no external ID in OVN, so parse their name instead + if netInfo.IsDefault() { + knownDefaultLBGroups := []string{ + types.ClusterLBGroupName, + types.ClusterSwitchLBGroupName, + types.ClusterRouterLBGroupName, + } + for _, knownGroup := range knownDefaultLBGroups { + if item.Name == knownGroup { + return true + + } + } + return false + } + // UDN + return strings.HasPrefix(item.Name, netInfo.GetNetworkName()) } + groups, err := libovsdbops.FindLoadBalancerGroupsWithPredicate(nbClient, pg) if err != nil { return nil, nil, fmt.Errorf("could not list load balancer groups: %w", err) diff --git a/go-controller/pkg/ovn/controller/services/loadbalancer_test.go b/go-controller/pkg/ovn/controller/services/loadbalancer_test.go index d48e44589e..8ade7b5079 100644 --- a/go-controller/pkg/ovn/controller/services/loadbalancer_test.go +++ b/go-controller/pkg/ovn/controller/services/loadbalancer_test.go @@ -6,24 +6,23 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilpointer "k8s.io/utils/pointer" ) +var ( + name string = "foo" + namespace string = "testns" +) + func TestEnsureStaleLBs(t *testing.T) { nbClient, cleanup, err := libovsdbtest.NewNBTestHarness(libovsdbtest.TestSetup{}, nil) if err != nil { t.Fatalf("Error creating NB: %v", err) } t.Cleanup(cleanup.Cleanup) - name := "foo" - namespace := "testns" - defaultExternalIDs := map[string]string{ - types.LoadBalancerKindExternalID: "Service", - types.LoadBalancerOwnerExternalID: fmt.Sprintf("%s/%s", namespace, name), - } defaultService := &v1.Service{ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, @@ -35,9 +34,9 @@ func TestEnsureStaleLBs(t *testing.T) { staleLBs := []LB{ { Name: "Service_testns/foo_TCP_node_router_node-a", - ExternalIDs: defaultExternalIDs, - Routers: []string{"gr-node-a", "non-exisitng-router"}, - Switches: []string{"non-exisitng-switch"}, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), + Routers: []string{"gr-node-a", "non-existing-router"}, + Switches: []string{"non-existing-switch"}, Groups: []string{"non-existing-group"}, Protocol: "TCP", Rules: []LBRule{ @@ -50,9 +49,9 @@ func TestEnsureStaleLBs(t *testing.T) { }, { Name: "Service_testns/foo_TCP_node_is_gone", - ExternalIDs: defaultExternalIDs, - Routers: []string{"gr-node-is-gone", "non-exisitng-router"}, - Switches: []string{"non-exisitng-switch"}, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), + Routers: []string{"gr-node-is-gone", "non-existing-router"}, + Switches: []string{"non-existing-switch"}, Groups: []string{"non-existing-group"}, Protocol: "TCP", Rules: []LBRule{ @@ -70,7 +69,7 @@ func TestEnsureStaleLBs(t *testing.T) { LBs := []LB{ { Name: "Service_testns/foo_TCP_node_router_node-a", - ExternalIDs: defaultExternalIDs, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), Routers: []string{"gr-node-a"}, Protocol: "TCP", Rules: []LBRule{ @@ -82,7 +81,7 @@ func TestEnsureStaleLBs(t *testing.T) { UUID: "", // intentionally left empty to make sure EnsureLBs sets it properly }, } - err = EnsureLBs(nbClient, defaultService, staleLBs, LBs) + err = EnsureLBs(nbClient, defaultService, staleLBs, LBs, &util.DefaultNetInfo{}) if err != nil { t.Fatalf("Error EnsureLBs: %v", err) } @@ -101,7 +100,7 @@ func TestEnsureLBs(t *testing.T) { { desc: "create service with permanent session affinity", service: &v1.Service{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "testns"}, + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, Spec: v1.ServiceSpec{ Type: v1.ServiceTypeClusterIP, SessionAffinity: v1.ServiceAffinityClientIP, @@ -114,13 +113,10 @@ func TestEnsureLBs(t *testing.T) { }, LBs: []LB{ { - Name: "Service_foo/testns_TCP_cluster", - ExternalIDs: map[string]string{ - types.LoadBalancerKindExternalID: "Service", - types.LoadBalancerOwnerExternalID: fmt.Sprintf("%s/%s", "foo", "testns"), - }, - Routers: []string{"gr-node-a"}, - Protocol: "TCP", + Name: "Service_foo/testns_TCP_cluster", + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), + Routers: []string{"gr-node-a"}, + Protocol: "TCP", Rules: []LBRule{ { Source: Addr{IP: "192.168.1.1", Port: 80}, @@ -135,21 +131,21 @@ func TestEnsureLBs(t *testing.T) { }, }, finalLB: &nbdb.LoadBalancer{ - UUID: loadBalancerClusterWideTCPServiceName("foo", "testns"), - Name: loadBalancerClusterWideTCPServiceName("foo", "testns"), + UUID: clusterWideTCPServiceLoadBalancerName(name, namespace), + Name: clusterWideTCPServiceLoadBalancerName(name, namespace), Options: servicesOptions(), Protocol: &nbdb.LoadBalancerProtocolTCP, Vips: map[string]string{ "192.168.1.1:80": "10.0.244.3:8080", }, - ExternalIDs: serviceExternalIDs(namespacedServiceName("foo", "testns")), + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), SelectionFields: []string{"ip_src", "ip_dst"}, // permanent session affinity, no learn flows }, }, { desc: "create service with default session affinity timeout", service: &v1.Service{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "testns"}, + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, Spec: v1.ServiceSpec{ Type: v1.ServiceTypeClusterIP, SessionAffinity: v1.ServiceAffinityClientIP, @@ -162,13 +158,10 @@ func TestEnsureLBs(t *testing.T) { }, LBs: []LB{ { - Name: "Service_foo/testns_TCP_cluster", - ExternalIDs: map[string]string{ - types.LoadBalancerKindExternalID: "Service", - types.LoadBalancerOwnerExternalID: fmt.Sprintf("%s/%s", "foo", "testns"), - }, - Routers: []string{"gr-node-a"}, - Protocol: "TCP", + Name: "Service_foo/testns_TCP_cluster", + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), + Routers: []string{"gr-node-a"}, + Protocol: "TCP", Rules: []LBRule{ { Source: Addr{IP: "192.168.1.1", Port: 80}, @@ -183,14 +176,14 @@ func TestEnsureLBs(t *testing.T) { }, }, finalLB: &nbdb.LoadBalancer{ - UUID: loadBalancerClusterWideTCPServiceName("foo", "testns"), - Name: loadBalancerClusterWideTCPServiceName("foo", "testns"), + UUID: clusterWideTCPServiceLoadBalancerName(name, namespace), + Name: clusterWideTCPServiceLoadBalancerName(name, namespace), Options: servicesOptionsWithAffinityTimeout(), // timeout set in the options Protocol: &nbdb.LoadBalancerProtocolTCP, Vips: map[string]string{ "192.168.1.1:80": "10.0.244.3:8080", }, - ExternalIDs: serviceExternalIDs(namespacedServiceName("foo", "testns")), + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(namespace, name)), }, }, } @@ -208,7 +201,7 @@ func TestEnsureLBs(t *testing.T) { } t.Cleanup(cleanup.Cleanup) - err = EnsureLBs(nbClient, tt.service, []LB{}, tt.LBs) + err = EnsureLBs(nbClient, tt.service, []LB{}, tt.LBs, &util.DefaultNetInfo{}) if err != nil { t.Fatalf("Error EnsureLBs: %v", err) } @@ -216,7 +209,7 @@ func TestEnsureLBs(t *testing.T) { tt.finalLB, &nbdb.LogicalRouter{ Name: "gr-node-a", - LoadBalancer: []string{loadBalancerClusterWideTCPServiceName("foo", "testns")}, + LoadBalancer: []string{clusterWideTCPServiceLoadBalancerName(name, namespace)}, }, }) success, err := matcher.Match(nbClient) diff --git a/go-controller/pkg/ovn/controller/services/node_tracker.go b/go-controller/pkg/ovn/controller/services/node_tracker.go index 6a16fc23db..6a4df574c4 100644 --- a/go-controller/pkg/ovn/controller/services/node_tracker.go +++ b/go-controller/pkg/ovn/controller/services/node_tracker.go @@ -32,6 +32,8 @@ type nodeTracker struct { // zone in which this nodeTracker is tracking zone string + + netInfo util.NetInfo } type nodeInfo struct { @@ -76,11 +78,13 @@ func (ni *nodeInfo) l3gatewayAddressesStr() []string { return out } -func newNodeTracker(zone string, resyncFn func(nodes []nodeInfo)) *nodeTracker { +func newNodeTracker(zone string, resyncFn func(nodes []nodeInfo), netInfo util.NetInfo) *nodeTracker { + return &nodeTracker{ nodes: map[string]nodeInfo{}, zone: zone, resyncFn: resyncFn, + netInfo: netInfo, } } @@ -205,7 +209,15 @@ func (nt *nodeTracker) removeNode(nodeName string) { // The gateway router will exist sometime after the L3Gateway annotation is set. func (nt *nodeTracker) updateNode(node *v1.Node) { klog.V(2).Infof("Processing possible switch / router updates for node %s", node.Name) - hsn, err := util.ParseNodeHostSubnetAnnotation(node, types.DefaultNetworkName) + var hsn []*net.IPNet + var err error + if nt.netInfo.TopologyType() == types.Layer2Topology { + for _, subnet := range nt.netInfo.Subnets() { + hsn = append(hsn, subnet.CIDR) + } + } else { + hsn, err = util.ParseNodeHostSubnetAnnotation(node, nt.netInfo.GetNetworkName()) + } if err != nil || hsn == nil || util.NoHostSubnet(node) { // usually normal; means the node's gateway hasn't been initialized yet klog.Infof("Node %s has invalid / no HostSubnet annotations (probably waiting on initialization), or it's a hybrid overlay node: %v", node.Name, err) @@ -213,7 +225,7 @@ func (nt *nodeTracker) updateNode(node *v1.Node) { return } - switchName := node.Name + switchName := nt.netInfo.GetNetworkScopedSwitchName(node.Name) grName := "" l3gatewayAddresses := []net.IP{} chassisID := "" @@ -225,7 +237,8 @@ func (nt *nodeTracker) updateNode(node *v1.Node) { if err != nil || gwConf == nil { klog.Infof("Node %s has invalid / no gateway config: %v", node.Name, err) } else if gwConf.Mode != globalconfig.GatewayModeDisabled { - grName = util.GetGatewayRouterFromNode(node.Name) + grName = nt.netInfo.GetNetworkScopedGWRouterName(node.Name) + // L3 GW IP addresses are not network-specific, we can take them from the default L3 GW annotation for _, ip := range gwConf.IPAddresses { l3gatewayAddresses = append(l3gatewayAddresses, ip.IP) } diff --git a/go-controller/pkg/ovn/controller/services/repair.go b/go-controller/pkg/ovn/controller/services/repair.go index 38c91bb610..84b6bb672a 100644 --- a/go-controller/pkg/ovn/controller/services/repair.go +++ b/go-controller/pkg/ovn/controller/services/repair.go @@ -5,9 +5,12 @@ import ( "time" libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" @@ -50,7 +53,7 @@ func newRepair(serviceLister corelisters.ServiceLister, nbClient libovsdbclient. } // runBeforeSync performs some cleanup of stale LBs and other miscellaneous setup. -func (r *repair) runBeforeSync(useTemplates bool) { +func (r *repair) runBeforeSync(useTemplates bool, netInfo util.NetInfo, nodes map[string]nodeInfo) { // no need to lock, single-threaded. startTime := time.Now() @@ -80,7 +83,7 @@ func (r *repair) runBeforeSync(useTemplates bool) { } // Find all load-balancers associated with Services - existingLBs, err := getLBs(r.nbClient, allTemplates) + existingLBs, err := getAllLBs(r.nbClient, allTemplates) if err != nil { klog.Errorf("Unable to get service lbs for repair: %v", err) } @@ -150,6 +153,42 @@ func (r *repair) runBeforeSync(useTemplates bool) { klog.Errorf("Failed to purge existing reject rules: %v", err) } } + + // remove static routes for UDN enabled services that are no longer valid + udnDelPredicate := func(route *nbdb.LogicalRouterStaticRoute) bool { + if route.ExternalIDs[types.NetworkExternalID] == netInfo.GetNetworkName() && + route.ExternalIDs[types.TopologyExternalID] == netInfo.TopologyType() { + if serviceKey, exists := route.ExternalIDs[types.UDNEnabledServiceExternalID]; exists { + if !r.unsyncedServices.Has(serviceKey) { + // the service doesn't exist + return true + } + if !util.IsUDNEnabledService(serviceKey) { + // the service is not a part of UDNAllowedDefaultServices anymore + return true + } + } + } + return false + } + + if netInfo.IsPrimaryNetwork() { + var ops []libovsdb.Operation + if netInfo.TopologyType() == types.Layer2Topology { + for _, node := range nodes { + if ops, err = libovsdbops.DeleteLogicalRouterStaticRoutesWithPredicateOps(r.nbClient, ops, netInfo.GetNetworkScopedGWRouterName(node.name), udnDelPredicate); err != nil { + klog.Errorf("Failed to create a delete logical router static route op: %v", err) + } + } + } else { + if ops, err = libovsdbops.DeleteLogicalRouterStaticRoutesWithPredicateOps(r.nbClient, ops, netInfo.GetNetworkScopedClusterRouterName(), udnDelPredicate); err != nil { + klog.Errorf("Failed to create a delete logical router static route op: %v", err) + } + } + if _, err = libovsdbops.TransactAndCheck(r.nbClient, ops); err != nil { + klog.Errorf("Failed to delete logical router static routes: %v", err) + } + } } // serviceSynced is called by a ServiceController worker when it has successfully diff --git a/go-controller/pkg/ovn/controller/services/services_controller.go b/go-controller/pkg/ovn/controller/services/services_controller.go index a4ba19d5bd..6ce13ffcc8 100644 --- a/go-controller/pkg/ovn/controller/services/services_controller.go +++ b/go-controller/pkg/ovn/controller/services/services_controller.go @@ -3,31 +3,39 @@ package services import ( "errors" "fmt" + "net" "sync" "time" libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + "golang.org/x/time/rate" + globalconfig "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + networkAttachDefController "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/network-attach-def-controller" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" - "golang.org/x/time/rate" v1 "k8s.io/api/core/v1" discovery "k8s.io/api/discovery/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + ktypes "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" + discoverylisters "k8s.io/client-go/listers/discovery/v1" + utilnet "k8s.io/utils/net" coreinformers "k8s.io/client-go/informers/core/v1" discoveryinformers "k8s.io/client-go/informers/discovery/v1" clientset "k8s.io/client-go/kubernetes" corelisters "k8s.io/client-go/listers/core/v1" - discoverylisters "k8s.io/client-go/listers/discovery/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" @@ -47,7 +55,7 @@ const ( nodeControllerName = "node-tracker-controller" ) -var NoServiceLabelError = fmt.Errorf("endpointSlice missing %s label", discovery.LabelServiceName) +var NoServiceLabelError = fmt.Errorf("endpointSlice missing the service name label") // NewController returns a new *Controller. func NewController(client clientset.Interface, @@ -55,13 +63,18 @@ func NewController(client clientset.Interface, serviceInformer coreinformers.ServiceInformer, endpointSliceInformer discoveryinformers.EndpointSliceInformer, nodeInformer coreinformers.NodeInformer, + nadController networkAttachDefController.NADController, recorder record.EventRecorder, + netInfo util.NetInfo, ) (*Controller, error) { - klog.V(4).Info("Creating event broadcaster") + klog.V(4).Infof("Creating services controller for network=%s", netInfo.GetNetworkName()) c := &Controller{ - client: client, - nbClient: nbClient, - queue: workqueue.NewNamedRateLimitingQueue(newRatelimiter(100), controllerName), + client: client, + nbClient: nbClient, + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + newRatelimiter(100), + workqueue.TypedRateLimitingQueueConfig[string]{Name: controllerName}, + ), workerLoopPeriod: time.Second, alreadyApplied: map[string][]LB{}, nodeIPv4Templates: NewNodeIPsTemplates(v1.IPv4Protocol), @@ -70,10 +83,13 @@ func NewController(client clientset.Interface, serviceLister: serviceInformer.Lister(), endpointSliceInformer: endpointSliceInformer, endpointSliceLister: endpointSliceInformer.Lister(), - eventRecorder: recorder, - repair: newRepair(serviceInformer.Lister(), nbClient), - nodeInformer: nodeInformer, - nodesSynced: nodeInformer.Informer().HasSynced, + nadController: nadController, + + eventRecorder: recorder, + repair: newRepair(serviceInformer.Lister(), nbClient), + nodeInformer: nodeInformer, + nodesSynced: nodeInformer.Informer().HasSynced, + netInfo: netInfo, } zone, err := libovsdbutil.GetNBZone(c.nbClient) if err != nil { @@ -82,11 +98,7 @@ func NewController(client clientset.Interface, // load balancers need to be applied to nodes, so // we need to watch Node objects for changes. // Need to re-sync all services when a node gains its switch or GWR - c.nodeTracker = newNodeTracker(zone, c.RequestFullSync) - if err != nil { - return nil, err - } - + c.nodeTracker = newNodeTracker(zone, c.RequestFullSync, netInfo) return c, nil } @@ -103,9 +115,9 @@ type Controller struct { serviceLister corelisters.ServiceLister endpointSliceInformer discoveryinformers.EndpointSliceInformer - // endpointSliceLister is able to list/get endpoint slices and is populated - // by the shared informer passed to NewController - endpointSliceLister discoverylisters.EndpointSliceLister + endpointSliceLister discoverylisters.EndpointSliceLister + + nadController networkAttachDefController.NADController nodesSynced cache.InformerSynced @@ -114,7 +126,7 @@ type Controller struct { // more often than services with few pods; it also would cause a // service that's inserted multiple times to be processed more than // necessary. - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] // workerLoopPeriod is the time between worker runs. The workers process the queue of service and pod changes. workerLoopPeriod time.Duration @@ -154,6 +166,8 @@ type Controller struct { // 'true' if Chassis_Template_Var is supported. useTemplates bool + + netInfo util.NetInfo } // Run will not return until stopCh is closed. workers determines how many @@ -164,16 +178,15 @@ func (c *Controller) Run(workers int, stopCh <-chan struct{}, runRepair, useLBGr c.useLBGroups = useLBGroups c.useTemplates = useTemplates - - klog.Infof("Starting controller %s", controllerName) - defer klog.Infof("Shutting down controller %s", controllerName) + klog.Infof("Starting controller %s for network=%s", controllerName, c.netInfo.GetNetworkName()) + defer klog.Infof("Shutting down controller %s for network=%s", controllerName, c.netInfo.GetNetworkName()) nodeHandler, err := c.nodeTracker.Start(c.nodeInformer) if err != nil { return err } // We need the node tracker to be synced first, as we rely on it to properly reprogram initial per node load balancers - klog.Info("Waiting for node tracker handler to sync") + klog.Infof("Waiting for node tracker handler to sync for network=%s", c.netInfo.GetNetworkName()) c.startupDoneLock.Lock() c.startupDone = false c.startupDoneLock.Unlock() @@ -181,7 +194,7 @@ func (c *Controller) Run(workers int, stopCh <-chan struct{}, runRepair, useLBGr return fmt.Errorf("error syncing node tracker handler") } - klog.Info("Setting up event handlers for services") + klog.Infof("Setting up event handlers for services for network=%s", c.netInfo.GetNetworkName()) svcHandler, err := c.serviceInformer.Informer().AddEventHandler(factory.WithUpdateHandlingForObjReplace(cache.ResourceEventHandlerFuncs{ AddFunc: c.onServiceAdd, UpdateFunc: c.onServiceUpdate, @@ -191,20 +204,22 @@ func (c *Controller) Run(workers int, stopCh <-chan struct{}, runRepair, useLBGr return err } - klog.Info("Setting up event handlers for endpoint slices") + klog.Infof("Setting up event handlers for endpoint slices for network=%s", c.netInfo.GetNetworkName()) endpointHandler, err := c.endpointSliceInformer.Informer().AddEventHandler(factory.WithUpdateHandlingForObjReplace( - // Filter out objects without the default serviceName label to exclude mirrored EndpointSlices - // This controller instance only handles services in the default cluster network - util.GetDefaultEndpointSlicesEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: c.onEndpointSliceAdd, - UpdateFunc: c.onEndpointSliceUpdate, - DeleteFunc: c.onEndpointSliceDelete, - }))) + // Filter out endpointslices that don't belong to this network (i.e. keep only kube-generated endpointslices if + // on default network, keep only mirrored endpointslices for this network if on UDN) + util.GetEndpointSlicesEventHandlerForNetwork( + cache.ResourceEventHandlerFuncs{ + AddFunc: c.onEndpointSliceAdd, + UpdateFunc: c.onEndpointSliceUpdate, + DeleteFunc: c.onEndpointSliceDelete, + }, + c.netInfo))) if err != nil { return err } - klog.Info("Waiting for service and endpoint handlers to sync") + klog.Infof("Waiting for service and endpoint handlers to sync for network=%s", c.netInfo.GetNetworkName()) if !util.WaitForHandlerSyncWithTimeout(controllerName, stopCh, types.HandlerSyncTimeout, svcHandler.HasSynced, endpointHandler.HasSynced) { return fmt.Errorf("error syncing service and endpoint handlers") } @@ -213,7 +228,7 @@ func (c *Controller) Run(workers int, stopCh <-chan struct{}, runRepair, useLBGr // Run the repair controller only once // it keeps in sync Kubernetes and OVN // and handles removal of stale data on upgrades - c.repair.runBeforeSync(c.useTemplates) + c.repair.runBeforeSync(c.useTemplates, c.netInfo, c.nodeTracker.nodes) } if err := c.initTopLevelCache(); err != nil { @@ -225,7 +240,7 @@ func (c *Controller) Run(workers int, stopCh <-chan struct{}, runRepair, useLBGr c.startupDoneLock.Unlock() // Start the workers after the repair loop to avoid races - klog.Info("Starting workers") + klog.Infof("Starting workers for network=%s", c.netInfo.GetNetworkName()) for i := 0; i < workers; i++ { go wait.Until(c.worker, c.workerLoopPeriod, stopCh) } @@ -250,14 +265,14 @@ func (c *Controller) processNextWorkItem() bool { } defer c.queue.Done(eKey) - err := c.syncService(eKey.(string)) + err := c.syncService(eKey) c.handleErr(err, eKey) return true } -func (c *Controller) handleErr(err error, key interface{}) { - ns, name, keyErr := cache.SplitMetaNamespaceKey(key.(string)) +func (c *Controller) handleErr(err error, key string) { + ns, name, keyErr := cache.SplitMetaNamespaceKey(key) if keyErr != nil { klog.ErrorS(err, "Failed to split meta namespace cache key", "key", key) } @@ -275,7 +290,7 @@ func (c *Controller) handleErr(err error, key interface{}) { return } - klog.Warningf("Dropping service %q out of the queue: %v", key, err) + klog.Warningf("Dropping service %q out of the queue for network=%s: %v", key, c.netInfo.GetNetworkName(), err) metrics.GetConfigDurationRecorder().End("service", ns, name) c.queue.Forget(key) utilruntime.HandleError(err) @@ -302,7 +317,7 @@ func (c *Controller) initTopLevelCache() error { } // Then list all load balancers and their respective services. - services, lbs, err := getServiceLBs(c.nbClient, allTemplates) + services, lbs, err := getServiceLBsForNetwork(c.nbClient, allTemplates, c.netInfo) if err != nil { return fmt.Errorf("failed to load balancers: %w", err) } @@ -314,8 +329,8 @@ func (c *Controller) initTopLevelCache() error { c.alreadyApplied[service] = append(c.alreadyApplied[service], *lb) } - klog.Infof("Controller cache of %d load balancers initialized for %d services", - len(lbs), len(c.alreadyApplied)) + klog.Infof("Controller cache of %d load balancers initialized for %d services for network=%s", + len(lbs), len(c.alreadyApplied), c.netInfo.GetNetworkName()) return nil } @@ -332,11 +347,11 @@ func (c *Controller) syncService(key string) error { if err != nil { return err } - klog.V(5).Infof("Processing sync for service %s/%s", namespace, name) + klog.V(5).Infof("Processing sync for service %s/%s for network=%s", namespace, name, c.netInfo.GetNetworkName()) metrics.MetricSyncServiceCount.Inc() defer func() { - klog.V(5).Infof("Finished syncing service %s on namespace %s : %v", name, namespace, time.Since(startTime)) + klog.V(5).Infof("Finished syncing service %s on namespace %s for network=%s : %v", name, namespace, c.netInfo.GetNetworkName(), time.Since(startTime)) metrics.MetricSyncServiceLatency.Observe(time.Since(startTime).Seconds()) }() @@ -353,6 +368,21 @@ func (c *Controller) syncService(key string) error { return err } + // Handle default network services enabled for UDN in shared gateway mode + if c.netInfo.IsPrimaryNetwork() && + util.IsUDNEnabledService(key) { + + if service == nil { + return c.cleanupUDNEnabledServiceRoute(key) + } + + err = c.configureUDNEnabledServiceRoute(service) + if err != nil { + return fmt.Errorf("failed to configure the UDN enabled service route: %v", err) + } + return nil + } + // Delete the Service's LB(s) from OVN if: // - the Service was deleted from the cache (doesn't exist in Kubernetes anymore) // - the Service mutated to a new service Type that we don't handle (ExternalName, Headless) @@ -380,7 +410,7 @@ func (c *Controller) syncService(key string) error { // worker will be operating at a given service. That is why it is safe to have changes to this cache // from multiple workers, because the `key` is always uniquely hashed to the same worker thread. - if err := EnsureLBs(c.nbClient, service, existingLBs, nil); err != nil { + if err := EnsureLBs(c.nbClient, service, existingLBs, nil, c.netInfo); err != nil { return fmt.Errorf("failed to delete load balancers for service %s/%s: %w", namespace, name, err) } @@ -394,38 +424,29 @@ func (c *Controller) syncService(key string) error { return nil } - // // The Service exists in the cache: update it in OVN + klog.V(5).Infof("Service %s/%s retrieved from lister for network=%s: %v", service.Namespace, service.Name, c.netInfo.GetNetworkName(), service) - klog.V(5).Infof("Service %s retrieved from lister: %v", service.Name, service) - - // Get the endpoint slices associated to the Service - esLabelSelector := labels.Set(map[string]string{ - discovery.LabelServiceName: name, - }).AsSelectorPreValidated() - endpointSlices, err := c.endpointSliceLister.EndpointSlices(namespace).List(esLabelSelector) + endpointSlices, err := util.GetServiceEndpointSlices(namespace, service.Name, c.netInfo.GetNetworkName(), c.endpointSliceLister) if err != nil { - // Since we're getting stuff from a local cache, it is basically impossible to get this error. - c.eventRecorder.Eventf(service, v1.EventTypeWarning, "FailedToListEndpointSlices", - "Error listing Endpoint Slices for Service %s/%s: %v", namespace, name, err) - return err + return fmt.Errorf("service %s/%s for network=%s, %w", service.Namespace, service.Name, c.netInfo.GetNetworkName(), err) } // Build the abstract LB configs for this service - perNodeConfigs, templateConfigs, clusterConfigs := buildServiceLBConfigs(service, endpointSlices, c.nodeInfos, c.useLBGroups, c.useTemplates) - klog.V(5).Infof("Built service %s LB cluster-wide configs %#v", key, clusterConfigs) - klog.V(5).Infof("Built service %s LB per-node configs %#v", key, perNodeConfigs) - klog.V(5).Infof("Built service %s LB template configs %#v", key, templateConfigs) + perNodeConfigs, templateConfigs, clusterConfigs := buildServiceLBConfigs(service, endpointSlices, c.nodeInfos, c.useLBGroups, c.useTemplates, c.netInfo.GetNetworkName()) + klog.V(5).Infof("Built service %s LB cluster-wide configs for network=%s: %#v", key, c.netInfo.GetNetworkName(), clusterConfigs) + klog.V(5).Infof("Built service %s LB per-node configs for network=%s: %#v", key, c.netInfo.GetNetworkName(), perNodeConfigs) + klog.V(5).Infof("Built service %s LB template configs for network=%s: %#v", key, c.netInfo.GetNetworkName(), templateConfigs) // Convert the LB configs in to load-balancer objects - clusterLBs := buildClusterLBs(service, clusterConfigs, c.nodeInfos, c.useLBGroups) - templateLBs := buildTemplateLBs(service, templateConfigs, c.nodeInfos, c.nodeIPv4Templates, c.nodeIPv6Templates) - perNodeLBs := buildPerNodeLBs(service, perNodeConfigs, c.nodeInfos) - klog.V(5).Infof("Built service %s cluster-wide LB %#v", key, clusterLBs) - klog.V(5).Infof("Built service %s per-node LB %#v", key, perNodeLBs) - klog.V(5).Infof("Built service %s template LB %#v", key, templateLBs) - klog.V(5).Infof("Service %s has %d cluster-wide, %d per-node configs, %d template configs, making %d (cluster) %d (per node) and %d (template) load balancers", - key, len(clusterConfigs), len(perNodeConfigs), len(templateConfigs), + clusterLBs := buildClusterLBs(service, clusterConfigs, c.nodeInfos, c.useLBGroups, c.netInfo) + templateLBs := buildTemplateLBs(service, templateConfigs, c.nodeInfos, c.nodeIPv4Templates, c.nodeIPv6Templates, c.netInfo) + perNodeLBs := buildPerNodeLBs(service, perNodeConfigs, c.nodeInfos, c.netInfo) + klog.V(5).Infof("Built service %s cluster-wide LB for network=%s: %#v", key, c.netInfo.GetNetworkName(), clusterLBs) + klog.V(5).Infof("Built service %s per-node LB for network=%s: %#v", key, c.netInfo.GetNetworkName(), perNodeLBs) + klog.V(5).Infof("Built service %s template LB for network=%s: %#v", key, c.netInfo.GetNetworkName(), templateLBs) + klog.V(5).Infof("Service %s for network=%s has %d cluster-wide, %d per-node configs, %d template configs, making %d (cluster) %d (per node) and %d (template) load balancers", + key, c.netInfo.GetNetworkName(), len(clusterConfigs), len(perNodeConfigs), len(templateConfigs), len(clusterLBs), len(perNodeLBs), len(templateLBs)) lbs := append(clusterLBs, templateLBs...) lbs = append(lbs, perNodeLBs...) @@ -441,15 +462,15 @@ func (c *Controller) syncService(key string) error { c.alreadyAppliedRWLock.RUnlock() if alreadyAppliedKeyExists && LoadBalancersEqualNoUUID(existingLBs, lbs) { - klog.V(5).Infof("Skipping no-op change for service %s", key) + klog.V(5).Infof("Skipping no-op change for service %s for network=%s", key, c.netInfo.GetNetworkName()) } else { - klog.V(5).Infof("Services do not match, existing lbs: %#v, built lbs: %#v", existingLBs, lbs) + klog.V(5).Infof("Services do not match for network=%s, existing lbs: %#v, built lbs: %#v", c.netInfo.GetNetworkName(), existingLBs, lbs) // Actually apply load-balancers to OVN. // // Note: this may fail if a node was deleted between listing nodes and applying. // If so, this will fail and we will resync. - if err := EnsureLBs(c.nbClient, service, existingLBs, lbs); err != nil { - return fmt.Errorf("failed to ensure service %s load balancers: %w", key, err) + if err := EnsureLBs(c.nbClient, service, existingLBs, lbs, c.netInfo); err != nil { + return fmt.Errorf("failed to ensure service %s load balancers for network=%s: %w", key, c.netInfo.GetNetworkName(), err) } c.alreadyAppliedRWLock.Lock() @@ -482,8 +503,8 @@ func (c *Controller) syncNodeInfos(nodeInfos []nodeInfo) { if globalconfig.IPv4Mode { ips, err := util.MatchIPFamily(false, nodeInfo.hostAddresses) if err != nil { - klog.Warningf("Error while searching for IPv4 host addresses in %v for node[%s] : %v", - nodeInfo.hostAddresses, nodeInfo.name, err) + klog.Warningf("Error while searching for IPv4 host addresses in %v for node[%s] for network=%s: %v", + nodeInfo.hostAddresses, nodeInfo.name, c.netInfo.GetNetworkName(), err) continue } @@ -495,8 +516,8 @@ func (c *Controller) syncNodeInfos(nodeInfos []nodeInfo) { if globalconfig.IPv6Mode { ips, err := util.MatchIPFamily(true, nodeInfo.hostAddresses) if err != nil { - klog.Warningf("Error while searching for IPv6 host addresses in %v for node[%s] : %v", - nodeInfo.hostAddresses, nodeInfo.name, err) + klog.Warningf("Error while searching for IPv6 host addresses in %v for node[%s] for network=%s: %v", + nodeInfo.hostAddresses, nodeInfo.name, c.netInfo.GetNetworkName(), err) continue } @@ -512,14 +533,14 @@ func (c *Controller) syncNodeInfos(nodeInfos []nodeInfo) { c.nodeIPv6Templates.AsTemplateMap(), } if err := svcCreateOrUpdateTemplateVar(c.nbClient, nodeIPTemplates); err != nil { - klog.Errorf("Could not sync node IP templates") + klog.Errorf("Could not sync node IP templates for network=%s", c.netInfo.GetNetworkName()) return } } // RequestFullSync re-syncs every service that currently exists func (c *Controller) RequestFullSync(nodeInfos []nodeInfo) { - klog.Info("Full service sync requested") + klog.Infof("Full service sync requested for network=%s", c.netInfo.GetNetworkName()) // Resync node infos and node IP templates. c.syncNodeInfos(nodeInfos) @@ -532,7 +553,7 @@ func (c *Controller) RequestFullSync(nodeInfos []nodeInfo) { if c.startupDone { services, err := c.serviceLister.List(labels.Everything()) if err != nil { - klog.Errorf("Cached lister failed!? %v", err) + klog.Errorf("Cached lister failed (network=%s)!? %v", c.netInfo.GetNetworkName(), err) return } @@ -544,16 +565,47 @@ func (c *Controller) RequestFullSync(nodeInfos []nodeInfo) { // handlers +// skipService is used when UDN is enabled to know which services are to be skipped because they don't +// belong to the network that this service controller is responsible for. +func (c *Controller) skipService(name, namespace string) bool { + if util.IsNetworkSegmentationSupportEnabled() { + serviceNetwork, err := c.nadController.GetActiveNetworkForNamespace(namespace) + if err != nil { + utilruntime.HandleError(fmt.Errorf("failed to retrieve network for service %s/%s: %w", + namespace, name, err)) + return true + } + + // Do not skip default network services enabled for UDN + if serviceNetwork.IsDefault() && + c.netInfo.IsPrimaryNetwork() && + globalconfig.Gateway.Mode == globalconfig.GatewayModeShared && + util.IsUDNEnabledService(ktypes.NamespacedName{Namespace: namespace, Name: name}.String()) { + return false + } + + if serviceNetwork.GetNetworkName() != c.netInfo.GetNetworkName() { + return true + } + } + + return false +} + // onServiceAdd queues the Service for processing. func (c *Controller) onServiceAdd(obj interface{}) { key, err := cache.MetaNamespaceKeyFunc(obj) if err != nil { - utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err)) + utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v for network=%s: %v", obj, c.netInfo.GetNetworkName(), err)) return } - klog.V(5).Infof("Adding service %s", key) + service := obj.(*v1.Service) + if c.skipService(service.Name, service.Namespace) { + return + } metrics.GetConfigDurationRecorder().Start("service", service.Namespace, service.Name) + klog.V(5).Infof("Adding service %s for network=%s", key, c.netInfo.GetNetworkName()) c.queue.Add(key) } @@ -570,6 +622,10 @@ func (c *Controller) onServiceUpdate(oldObj, newObj interface{}) { key, err := cache.MetaNamespaceKeyFunc(newObj) if err == nil { + if c.skipService(newService.Name, newService.Namespace) { + return + } + metrics.GetConfigDurationRecorder().Start("service", newService.Namespace, newService.Name) c.queue.Add(key) } @@ -582,8 +638,13 @@ func (c *Controller) onServiceDelete(obj interface{}) { utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err)) return } - klog.V(4).Infof("Deleting service %s", key) + klog.V(4).Infof("Deleting service %s for network=%s", key, c.netInfo.GetNetworkName()) service := obj.(*v1.Service) + + if c.skipService(service.Name, service.Namespace) { + return + } + metrics.GetConfigDurationRecorder().Start("service", service.Namespace, service.Name) c.queue.Add(key) } @@ -637,41 +698,143 @@ func (c *Controller) onEndpointSliceDelete(obj interface{}) { // queueServiceForEndpointSlice attempts to queue the corresponding Service for // the provided EndpointSlice. func (c *Controller) queueServiceForEndpointSlice(endpointSlice *discovery.EndpointSlice) { - key, err := ServiceControllerKey(endpointSlice) + serviceNamespacedName, err := c.getServiceNamespacedNameFromEndpointSlice(endpointSlice) if err != nil { // Do not log endpointsSlices missing service labels as errors. // Once the service label is eventually added, we will get this event // and re-process. if errors.Is(err, NoServiceLabelError) { - klog.V(5).Infof(err.Error()) + klog.V(5).Infof("network=%s, error=%s", c.netInfo.GetNetworkName(), err.Error()) } else { - utilruntime.HandleError(fmt.Errorf("couldn't get key for EndpointSlice %+v: %v", endpointSlice, err)) + utilruntime.HandleError(fmt.Errorf("network=%s, couldn't get key for EndpointSlice %+v: %v", c.netInfo.GetNetworkName(), endpointSlice, err)) } return } - c.queue.Add(key) + if c.skipService(serviceNamespacedName.Name, serviceNamespacedName.Namespace) { + return + } + c.queue.Add(serviceNamespacedName.String()) } -// serviceControllerKey returns a controller key for a Service but derived from +// GetServiceKeyFromEndpointSliceForDefaultNetwork returns a controller key for a Service but derived from // an EndpointSlice. -func ServiceControllerKey(endpointSlice *discovery.EndpointSlice) (string, error) { +// Not UDN-aware, is used for egress services +func GetServiceKeyFromEndpointSliceForDefaultNetwork(endpointSlice *discovery.EndpointSlice) (string, error) { + var key string + nsn, err := _getServiceNameFromEndpointSlice(endpointSlice, true) + if err == nil { + key = nsn.String() + } + return key, err +} + +func (c *Controller) getServiceNamespacedNameFromEndpointSlice(endpointSlice *discovery.EndpointSlice) (ktypes.NamespacedName, error) { + if c.netInfo.IsDefault() { + return _getServiceNameFromEndpointSlice(endpointSlice, true) + } else { + return _getServiceNameFromEndpointSlice(endpointSlice, false) + } +} + +func (c *Controller) cleanupUDNEnabledServiceRoute(key string) error { + klog.Infof("Removing UDN enabled service route for service %s in network: %s", key, c.netInfo.GetNetworkName()) + delPredicate := func(route *nbdb.LogicalRouterStaticRoute) bool { + return route.ExternalIDs[types.NetworkExternalID] == c.netInfo.GetNetworkName() && + route.ExternalIDs[types.TopologyExternalID] == c.netInfo.TopologyType() && + route.ExternalIDs[types.UDNEnabledServiceExternalID] == key + } + + var ops []libovsdb.Operation + var err error + if c.netInfo.TopologyType() == types.Layer2Topology { + for _, node := range c.nodeInfos { + if ops, err = libovsdbops.DeleteLogicalRouterStaticRoutesWithPredicateOps(c.nbClient, ops, c.netInfo.GetNetworkScopedGWRouterName(node.name), delPredicate); err != nil { + return err + } + } + } else { + if ops, err = libovsdbops.DeleteLogicalRouterStaticRoutesWithPredicateOps(c.nbClient, ops, c.netInfo.GetNetworkScopedClusterRouterName(), delPredicate); err != nil { + return err + } + } + _, err = libovsdbops.TransactAndCheck(c.nbClient, ops) + return err +} + +func (c *Controller) configureUDNEnabledServiceRoute(service *v1.Service) error { + klog.Infof("Configuring UDN enabled service route for service %s/%s in network: %s", service.Namespace, service.Name, c.netInfo.GetNetworkName()) + + extIDs := map[string]string{ + types.NetworkExternalID: c.netInfo.GetNetworkName(), + types.TopologyExternalID: c.netInfo.TopologyType(), + types.UDNEnabledServiceExternalID: ktypes.NamespacedName{Namespace: service.Namespace, Name: service.Name}.String(), + } + routesEqual := func(a, b *nbdb.LogicalRouterStaticRoute) bool { + return a.IPPrefix == b.IPPrefix && + a.ExternalIDs[types.NetworkExternalID] == b.ExternalIDs[types.NetworkExternalID] && + a.ExternalIDs[types.TopologyExternalID] == b.ExternalIDs[types.TopologyExternalID] && + a.ExternalIDs[types.UDNEnabledServiceExternalID] == b.ExternalIDs[types.UDNEnabledServiceExternalID] && + libovsdbops.PolicyEqualPredicate(a.Policy, b.Policy) && + a.Nexthop == b.Nexthop + + } + var ops []libovsdb.Operation + for _, nodeInfo := range c.nodeInfos { + var mgmtPortIPs []net.IP + for _, subnet := range nodeInfo.podSubnets { + mgmtPortIPs = append(mgmtPortIPs, util.GetNodeManagementIfAddr(&subnet).IP) + } + mgmtIP, err := util.MatchFirstIPFamily(utilnet.IsIPv6String(service.Spec.ClusterIP), mgmtPortIPs) + if err != nil { + return err + } + staticRoute := nbdb.LogicalRouterStaticRoute{ + Policy: &nbdb.LogicalRouterStaticRoutePolicyDstIP, + IPPrefix: service.Spec.ClusterIP, + Nexthop: mgmtIP.String(), + ExternalIDs: extIDs, + } + routerName := c.netInfo.GetNetworkScopedClusterRouterName() + if c.netInfo.TopologyType() == types.Layer2Topology { + routerName = nodeInfo.gatewayRouterName + } + ops, err = libovsdbops.CreateOrUpdateLogicalRouterStaticRoutesWithPredicateOps(c.nbClient, nil, routerName, &staticRoute, func(item *nbdb.LogicalRouterStaticRoute) bool { + return routesEqual(item, &staticRoute) + }) + if err != nil { + return err + } + } + + _, err := libovsdbops.TransactAndCheck(c.nbClient, ops) + return err +} + +func _getServiceNameFromEndpointSlice(endpointSlice *discovery.EndpointSlice, inDefaultNetwork bool) (ktypes.NamespacedName, error) { if endpointSlice == nil { - return "", fmt.Errorf("nil EndpointSlice passed to serviceControllerKey()") + return ktypes.NamespacedName{}, fmt.Errorf("nil EndpointSlice passed to _getServiceNameFromEndpointSlice()") } - serviceName, ok := endpointSlice.Labels[discovery.LabelServiceName] + + label := discovery.LabelServiceName + errTemplate := NoServiceLabelError + if !inDefaultNetwork { + label = types.LabelUserDefinedServiceName + } + + serviceName, ok := endpointSlice.Labels[label] if !ok || serviceName == "" { - return "", fmt.Errorf("%w: endpointSlice: %s/%s", NoServiceLabelError, endpointSlice.Namespace, - endpointSlice.Name) + return ktypes.NamespacedName{}, fmt.Errorf("%w: endpointSlice: %s/%s", + errTemplate, endpointSlice.Namespace, endpointSlice.Name) } - return fmt.Sprintf("%s/%s", endpointSlice.Namespace, serviceName), nil + return ktypes.NamespacedName{Namespace: endpointSlice.Namespace, Name: serviceName}, nil } // newRateLimiter makes a queue rate limiter. This limits re-queues somewhat more significantly than base qps. // the client-go default qps is 10, but this is low for our level of scale. -func newRatelimiter(qps int) workqueue.RateLimiter { - return workqueue.NewMaxOfRateLimiter( - workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second), - &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(qps), qps*5)}, +func newRatelimiter(qps int) workqueue.TypedRateLimiter[string] { + return workqueue.NewTypedMaxOfRateLimiter( + workqueue.NewTypedItemExponentialFailureRateLimiter[string](5*time.Millisecond, 1000*time.Second), + &workqueue.TypedBucketRateLimiter[string]{Limiter: rate.NewLimiter(rate.Limit(qps), qps*5)}, ) } diff --git a/go-controller/pkg/ovn/controller/services/services_controller_test.go b/go-controller/pkg/ovn/controller/services/services_controller_test.go index 8ae9856f48..3e82cd40ba 100644 --- a/go-controller/pkg/ovn/controller/services/services_controller_test.go +++ b/go-controller/pkg/ovn/controller/services/services_controller_test.go @@ -7,35 +7,42 @@ import ( "strings" "testing" - "github.com/onsi/ginkgo" + cnitypes "github.com/containernetworking/cni/pkg/types" "github.com/onsi/gomega" "github.com/onsi/gomega/format" libovsdbclient "github.com/ovn-org/libovsdb/client" - globalconfig "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" - libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" - kube_test "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" - libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "golang.org/x/exp/maps" v1 "k8s.io/api/core/v1" discovery "k8s.io/api/discovery/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" + utilnet "k8s.io/utils/net" utilpointer "k8s.io/utils/pointer" + + ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + globalconfig "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" + libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + networkAttachDefController "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/network-attach-def-controller" + kubetest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" + libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/nad" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) -var alwaysReady = func() bool { return true } -var FakeGRs = "GR_1 GR_2" -var initialLsGroups []string = []string{types.ClusterLBGroupName, types.ClusterSwitchLBGroupName} -var initialLrGroups []string = []string{types.ClusterLBGroupName, types.ClusterRouterLBGroupName} +var ( + nodeA = "node-a" + nodeB = "node-b" -var outport int32 = int32(3456) -var tcp v1.Protocol = v1.ProtocolTCP + tcp = v1.ProtocolTCP + udp = v1.ProtocolUDP +) type serviceController struct { *Controller @@ -44,19 +51,27 @@ type serviceController struct { libovsdbCleanup *libovsdbtest.Context } -func newController() (*serviceController, error) { - return newControllerWithDBSetup(libovsdbtest.TestSetup{}) -} - -func newControllerWithDBSetup(dbSetup libovsdbtest.TestSetup) (*serviceController, error) { - gomega.RegisterFailHandler(ginkgo.Fail) +func newControllerWithDBSetupForNetwork(dbSetup libovsdbtest.TestSetup, netInfo util.NetInfo, nadNamespace string) (*serviceController, error) { nbClient, cleanup, err := libovsdbtest.NewNBTestHarness(dbSetup, nil) + + if err != nil { + return nil, err + } + + config.OVNKubernetesFeature.EnableInterconnect = true + config.OVNKubernetesFeature.EnableMultiNetwork = true + config.OVNKubernetesFeature.EnableNetworkSegmentation = true + + client := util.GetOVNClientset().GetOVNKubeControllerClientset() + + factoryMock, err := factory.NewOVNKubeControllerWatchFactory(client) if err != nil { return nil, err } - client := fake.NewSimpleClientset() - informerFactory := informers.NewSharedInformerFactory(client, 0) + if err = factoryMock.Start(); err != nil { + return nil, err + } recorder := record.NewFakeRecorder(10) nbZoneFailed := false @@ -66,34 +81,54 @@ func newControllerWithDBSetup(dbSetup libovsdbtest.TestSetup) (*serviceControlle _, err = libovsdbutil.GetNBZone(nbClient) if err != nil { nbZoneFailed = true - err = createTestNBGlobal(nbClient, "global") - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if err = createTestNBGlobal(nbClient, "global"); err != nil { + return nil, err + } + } + testNCM := &nad.FakeNetworkControllerManager{} + nadController, err := networkAttachDefController.NewNetAttachDefinitionController("test", testNCM, factoryMock, nil) + if err != nil { + return nil, err } - controller, err := NewController(client, + controller, err := NewController(client.KubeClient, nbClient, - informerFactory.Core().V1().Services(), - informerFactory.Discovery().V1().EndpointSlices(), - informerFactory.Core().V1().Nodes(), + factoryMock.ServiceCoreInformer(), + factoryMock.EndpointSliceCoreInformer(), + factoryMock.NodeCoreInformer(), + nadController, recorder, + netInfo, ) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) + if err != nil { + return nil, err + } if nbZoneFailed { // Delete the NBGlobal row as this function created it. Otherwise many tests would fail while // checking the expectedData in the NBDB. - err = deleteTestNBGlobal(nbClient, "global") - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if err = deleteTestNBGlobal(nbClient, "global"); err != nil { + return nil, err + } } - controller.initTopLevelCache() + if err = controller.initTopLevelCache(); err != nil { + return nil, err + } controller.useLBGroups = true controller.useTemplates = true + // When testing services on UDN, add a NAD in the same namespace associated to the service + if !netInfo.IsDefault() { + if err = addSampleNAD(client, nadNamespace, netInfo); err != nil { + return nil, err + } + } + return &serviceController{ controller, - informerFactory.Core().V1().Services().Informer().GetStore(), - informerFactory.Discovery().V1().EndpointSlices().Informer().GetStore(), + factoryMock.ServiceCoreInformer().Informer().GetStore(), + factoryMock.EndpointSliceInformer().GetStore(), cleanup, }, nil } @@ -102,210 +137,534 @@ func (c *serviceController) close() { c.libovsdbCleanup.Cleanup() } +func getSampleUDNNetInfo(namespace string, topology string) (util.NetInfo, error) { + // requires that config.IPv4Mode = true + subnets := "192.168.200.0/16" + if topology == types.Layer3Topology { + subnets += "/24" + } + netInfo, err := util.NewNetInfo(&ovncnitypes.NetConf{ + Topology: topology, + NADName: fmt.Sprintf("%s/nad1", namespace), + MTU: 1400, + Role: "primary", + Subnets: subnets, + NetConf: cnitypes.NetConf{Name: fmt.Sprintf("net_%s", topology), Type: "ovn-k8s-cni-overlay"}, + JoinSubnet: "100.66.0.0/16", + }) + return netInfo, err +} + +func addSampleNAD(client *util.OVNKubeControllerClientset, namespace string, netInfo util.NetInfo) error { + _, err := client.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(namespace).Create( + context.TODO(), + kubetest.GenerateNAD(netInfo.GetNetworkName(), netInfo.GetNetworkName(), namespace, netInfo.TopologyType(), netInfo.Subnets()[0].String(), types.NetworkRolePrimary), + metav1.CreateOptions{}) + return err +} + // TestSyncServices - an end-to-end test for the services controller. func TestSyncServices(t *testing.T) { + // setup gomega parameters initialMaxLength := format.MaxLength temporarilyEnableGomegaMaxLengthFormat() t.Cleanup(func() { restoreGomegaMaxLengthFormat(initialMaxLength) }) - ns := "testns" - serviceName := "foo" + // define test constants + const ( + nodeAEndpoint = "10.128.0.2" + nodeAEndpoint2 = "10.128.0.3" + nodeAEndpointV6 = "fe00::5555:0:0:2" + nodeAEndpoint2V6 = "fe00::5555:0:0:3" + + nodeBEndpointIP = "10.128.1.2" + + nodeAHostAddress = "10.0.0.1" + nodeBHostAddress = "10.0.0.2" + nodePort = 8989 + + // the IPs below are only used in one test + nodeAHostAddress2 = "10.2.2.2" + nodeAHostAddress3 = "10.3.3.3" + nodeAHostAddressV6 = "fd00::1:0:0:1" + nodeAHostAddress2V6 = "fd00::2:0:0:2" + ) + + var ( + ns = "testns" + serviceName = "foo" + + serviceClusterIP = "192.168.1.1" + serviceClusterIPv6 = "fd00::7777:0:0:1" + servicePort = int32(80) + outPort = int32(3456) + + initialLsGroups = []string{types.ClusterLBGroupName, types.ClusterSwitchLBGroupName} + initialLrGroups = []string{types.ClusterLBGroupName, types.ClusterRouterLBGroupName} + ) + // setup global config oldGateway := globalconfig.Gateway.Mode oldClusterSubnet := globalconfig.Default.ClusterSubnets globalconfig.Kubernetes.OVNEmptyLbEvents = true globalconfig.IPv4Mode = true + globalconfig.IPv6Mode = true defer func() { globalconfig.Kubernetes.OVNEmptyLbEvents = false globalconfig.IPv4Mode = false + globalconfig.IPv6Mode = false globalconfig.Gateway.Mode = oldGateway globalconfig.Default.ClusterSubnets = oldClusterSubnet }() _, cidr4, _ := net.ParseCIDR("10.128.0.0/16") - _, cidr6, _ := net.ParseCIDR("fe00::/64") + _, cidr6, _ := net.ParseCIDR("fe00:0:0:0:5555::0/64") globalconfig.Default.ClusterSubnets = []globalconfig.CIDRNetworkEntry{{cidr4, 26}, {cidr6, 26}} - var ( - nodeA = "node-a" - nodeB = "node-b" - ) - const ( - nodeAEndpointIP = "10.128.0.2" - nodeBEndpointIP = "10.128.1.2" - nodeAHostIP = "10.0.0.1" - nodeBHostIP = "10.0.0.2" - ) - firstNode := nodeConfig(nodeA, nodeAHostIP) - secondNode := nodeConfig(nodeB, nodeBHostIP) - defaultNodes := map[string]nodeInfo{ - nodeA: *firstNode, - nodeB: *secondNode, + l3UDN, err := getSampleUDNNetInfo(ns, "layer3") + if err != nil { + t.Fatalf("Error creating UDNNetInfo: %v", err) + } + l2UDN, err := getSampleUDNNetInfo(ns, "layer2") + if err != nil { + t.Fatalf("Error creating UDNNetInfo: %v", err) } + // define node configs + nodeAInfo := getNodeInfo(nodeA, []string{nodeAHostAddress}, nil) + nodeBInfo := getNodeInfo(nodeB, []string{nodeBHostAddress}, nil) - const nodePort = 8989 + nodeAMultiAddressesV4 := []string{nodeAHostAddress, nodeAHostAddress2, nodeAHostAddress3} + nodeAMultiAddressesV6 := []string{nodeAHostAddressV6, nodeAHostAddress2V6} - tests := []struct { - name string - slice *discovery.EndpointSlice - service *v1.Service + nodeAInfoMultiIP := getNodeInfo(nodeA, nodeAMultiAddressesV4, nodeAMultiAddressesV6) + + type networkData struct { + netInfo util.NetInfo initialDb []libovsdbtest.TestData expectedDb []libovsdbtest.TestData - gatewayMode string - nodeToDelete *nodeInfo dbStateAfterDeleting []libovsdbtest.TestData + } + tests := []struct { + name string + nodeAInfo *nodeInfo + nodeBInfo *nodeInfo + enableIPv6 bool + slices []discovery.EndpointSlice + service *v1.Service + networks []networkData + gatewayMode string + nodeToDelete string }{ { - name: "create service from Single Stack Service without endpoints", - slice: &discovery.EndpointSlice{ - ObjectMeta: metav1.ObjectMeta{ - Name: serviceName + "ab23", - Namespace: ns, - Labels: map[string]string{discovery.LabelServiceName: serviceName}, + name: "create service from Single Stack Service without endpoints", + nodeAInfo: nodeAInfo, + nodeBInfo: nodeBInfo, + slices: []discovery.EndpointSlice{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName + "ab23", + Namespace: ns, + Labels: map[string]string{discovery.LabelServiceName: serviceName}, + }, + Ports: []discovery.EndpointPort{}, + AddressType: discovery.AddressTypeIPv4, + Endpoints: []discovery.Endpoint{}, }, - Ports: []discovery.EndpointPort{}, - AddressType: discovery.AddressTypeIPv4, - Endpoints: []discovery.Endpoint{}, }, service: &v1.Service{ ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: ns}, Spec: v1.ServiceSpec{ Type: v1.ServiceTypeClusterIP, - ClusterIP: "192.168.1.1", - ClusterIPs: []string{"192.168.1.1"}, + ClusterIP: serviceClusterIP, + ClusterIPs: []string{serviceClusterIP}, Selector: map[string]string{"foo": "bar"}, Ports: []v1.ServicePort{{ - Port: 80, + Port: servicePort, Protocol: v1.ProtocolTCP, - TargetPort: intstr.FromInt(3456), + TargetPort: intstr.FromInt32(outPort), }}, }, }, - initialDb: []libovsdbtest.TestData{ - nodeLogicalSwitch(nodeA, initialLsGroups), - nodeLogicalSwitch(nodeB, initialLsGroups), - nodeLogicalRouter(nodeA, initialLrGroups), - nodeLogicalRouter(nodeB, initialLrGroups), - lbGroup(types.ClusterLBGroupName), - lbGroup(types.ClusterSwitchLBGroupName), - lbGroup(types.ClusterRouterLBGroupName), - }, - expectedDb: []libovsdbtest.TestData{ - &nbdb.LoadBalancer{ - UUID: loadBalancerClusterWideTCPServiceName(ns, serviceName), - Name: loadBalancerClusterWideTCPServiceName(ns, serviceName), - Options: servicesOptions(), - Protocol: &nbdb.LoadBalancerProtocolTCP, - Vips: map[string]string{ - "192.168.1.1:80": "", - }, - ExternalIDs: serviceExternalIDs(namespacedServiceName(ns, serviceName)), + networks: []networkData{ + { + netInfo: &util.DefaultNetInfo{}, + initialDb: []libovsdbtest.TestData{ + nodeLogicalSwitch(nodeA, initialLsGroups), + nodeLogicalSwitch(nodeB, initialLsGroups), + nodeLogicalRouter(nodeA, initialLrGroups), + nodeLogicalRouter(nodeB, initialLrGroups), + lbGroup(types.ClusterLBGroupName), + lbGroup(types.ClusterSwitchLBGroupName), + lbGroup(types.ClusterRouterLBGroupName), + }, + expectedDb: []libovsdbtest.TestData{ + &nbdb.LoadBalancer{ + UUID: loadBalancerClusterWideTCPServiceName(ns, serviceName), + Name: loadBalancerClusterWideTCPServiceName(ns, serviceName), + Options: servicesOptions(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + IPAndPort(serviceClusterIP, servicePort): "", + }, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(ns, serviceName)), + }, + nodeLogicalSwitch(nodeA, initialLsGroups), + nodeLogicalSwitch(nodeB, initialLsGroups), + nodeLogicalRouter(nodeA, initialLrGroups), + nodeLogicalRouter(nodeB, initialLrGroups), + lbGroup(types.ClusterLBGroupName, loadBalancerClusterWideTCPServiceName(ns, serviceName)), + lbGroup(types.ClusterSwitchLBGroupName), + lbGroup(types.ClusterRouterLBGroupName), + nodeIPTemplate(nodeAInfo), + nodeIPTemplate(nodeBInfo), + }, + }, + { + netInfo: l3UDN, + initialDb: []libovsdbtest.TestData{ + nodeLogicalSwitch(nodeA, initialLsGroups), + nodeLogicalSwitch(nodeB, initialLsGroups), + nodeLogicalSwitchForNetwork(nodeA, initialLsGroups, l3UDN), + nodeLogicalSwitchForNetwork(nodeB, initialLsGroups, l3UDN), + + nodeLogicalRouter(nodeA, initialLrGroups), + nodeLogicalRouter(nodeB, initialLrGroups), + nodeLogicalRouterForNetwork(nodeA, initialLrGroups, l3UDN), + nodeLogicalRouterForNetwork(nodeB, initialLrGroups, l3UDN), + + lbGroup(types.ClusterLBGroupName), + lbGroup(types.ClusterSwitchLBGroupName), + lbGroup(types.ClusterRouterLBGroupName), + lbGroupForNetwork(types.ClusterLBGroupName, l3UDN), + lbGroupForNetwork(types.ClusterSwitchLBGroupName, l3UDN), + lbGroupForNetwork(types.ClusterRouterLBGroupName, l3UDN), + }, + expectedDb: []libovsdbtest.TestData{ + &nbdb.LoadBalancer{ + UUID: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l3UDN), + Name: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l3UDN), + Options: servicesOptions(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + IPAndPort(serviceClusterIP, servicePort): "", + }, + ExternalIDs: loadBalancerExternalIDsForNetwork(namespacedServiceName(ns, serviceName), l3UDN.GetNetworkName()), + }, + nodeLogicalSwitch(nodeA, initialLsGroups), + nodeLogicalSwitch(nodeB, initialLsGroups), + nodeLogicalSwitchForNetwork(nodeA, initialLsGroups, l3UDN), + nodeLogicalSwitchForNetwork(nodeB, initialLsGroups, l3UDN), + + nodeLogicalRouter(nodeA, initialLrGroups), + nodeLogicalRouter(nodeB, initialLrGroups), + nodeLogicalRouterForNetwork(nodeA, initialLrGroups, l3UDN), + nodeLogicalRouterForNetwork(nodeB, initialLrGroups, l3UDN), + + lbGroup(types.ClusterLBGroupName), + lbGroup(types.ClusterSwitchLBGroupName), + lbGroup(types.ClusterRouterLBGroupName), + lbGroupForNetwork(types.ClusterLBGroupName, l3UDN, clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l3UDN)), + lbGroupForNetwork(types.ClusterSwitchLBGroupName, l3UDN), + lbGroupForNetwork(types.ClusterRouterLBGroupName, l3UDN), + + nodeIPTemplate(nodeAInfo), + nodeIPTemplate(nodeBInfo), + }, + }, + { + netInfo: l2UDN, + initialDb: []libovsdbtest.TestData{ + nodeLogicalSwitch(nodeA, initialLsGroups), + nodeLogicalSwitch(nodeB, initialLsGroups), + nodeLogicalSwitchForNetwork("", initialLsGroups, l2UDN), + + nodeLogicalRouter(nodeA, initialLrGroups), + nodeLogicalRouter(nodeB, initialLrGroups), + nodeLogicalRouterForNetwork(nodeA, initialLrGroups, l2UDN), + nodeLogicalRouterForNetwork(nodeB, initialLrGroups, l2UDN), + + lbGroup(types.ClusterLBGroupName), + lbGroup(types.ClusterSwitchLBGroupName), + lbGroup(types.ClusterRouterLBGroupName), + lbGroupForNetwork(types.ClusterLBGroupName, l2UDN), + lbGroupForNetwork(types.ClusterSwitchLBGroupName, l2UDN), + lbGroupForNetwork(types.ClusterRouterLBGroupName, l2UDN), + }, + expectedDb: []libovsdbtest.TestData{ + &nbdb.LoadBalancer{ + UUID: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l2UDN), + Name: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l2UDN), + Options: servicesOptions(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + IPAndPort(serviceClusterIP, servicePort): "", + }, + ExternalIDs: loadBalancerExternalIDsForNetwork(namespacedServiceName(ns, serviceName), l2UDN.GetNetworkName()), + }, + nodeLogicalSwitch(nodeA, initialLsGroups), + nodeLogicalSwitch(nodeB, initialLsGroups), + nodeLogicalSwitchForNetwork("", initialLsGroups, l2UDN), + + nodeLogicalRouter(nodeA, initialLrGroups), + nodeLogicalRouter(nodeB, initialLrGroups), + nodeLogicalRouterForNetwork(nodeA, initialLrGroups, l2UDN), + nodeLogicalRouterForNetwork(nodeB, initialLrGroups, l2UDN), + + lbGroup(types.ClusterLBGroupName), + lbGroup(types.ClusterSwitchLBGroupName), + lbGroup(types.ClusterRouterLBGroupName), + lbGroupForNetwork(types.ClusterLBGroupName, l2UDN, clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l2UDN)), + lbGroupForNetwork(types.ClusterSwitchLBGroupName, l2UDN), + lbGroupForNetwork(types.ClusterRouterLBGroupName, l2UDN), + + nodeIPTemplate(nodeAInfo), + nodeIPTemplate(nodeBInfo), + }, }, - nodeLogicalSwitch(nodeA, initialLsGroups), - nodeLogicalSwitch(nodeB, initialLsGroups), - nodeLogicalRouter(nodeA, initialLrGroups), - nodeLogicalRouter(nodeB, initialLrGroups), - lbGroup(types.ClusterLBGroupName, loadBalancerClusterWideTCPServiceName(ns, serviceName)), - lbGroup(types.ClusterSwitchLBGroupName), - lbGroup(types.ClusterRouterLBGroupName), - nodeIPTemplate(firstNode), - nodeIPTemplate(secondNode), }, }, { - name: "update service without endpoints", - slice: &discovery.EndpointSlice{ - ObjectMeta: metav1.ObjectMeta{ - Name: serviceName + "ab23", - Namespace: ns, - Labels: map[string]string{discovery.LabelServiceName: serviceName}, + name: "update service without endpoints", + nodeAInfo: nodeAInfo, + nodeBInfo: nodeBInfo, + slices: []discovery.EndpointSlice{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName + "ab23", + Namespace: ns, + Labels: map[string]string{discovery.LabelServiceName: serviceName}, + }, + Ports: []discovery.EndpointPort{}, + AddressType: discovery.AddressTypeIPv4, + Endpoints: []discovery.Endpoint{}, }, - Ports: []discovery.EndpointPort{}, - AddressType: discovery.AddressTypeIPv4, - Endpoints: []discovery.Endpoint{}, }, service: &v1.Service{ ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: ns}, Spec: v1.ServiceSpec{ Type: v1.ServiceTypeClusterIP, - ClusterIP: "192.168.1.1", - ClusterIPs: []string{"192.168.1.1"}, + ClusterIP: serviceClusterIP, + ClusterIPs: []string{serviceClusterIP}, Selector: map[string]string{"foo": "bar"}, Ports: []v1.ServicePort{{ - Port: 80, + Port: servicePort, Protocol: v1.ProtocolTCP, - TargetPort: intstr.FromInt(3456), + TargetPort: intstr.FromInt32(outPort), }}, }, }, - initialDb: []libovsdbtest.TestData{ - &nbdb.LoadBalancer{ - UUID: loadBalancerClusterWideTCPServiceName(ns, serviceName), - Name: loadBalancerClusterWideTCPServiceName(ns, serviceName), - Options: servicesOptions(), - Protocol: &nbdb.LoadBalancerProtocolTCP, - Vips: map[string]string{ - "192.168.0.1:6443": "", - }, - ExternalIDs: serviceExternalIDs(namespacedServiceName(ns, serviceName)), + networks: []networkData{ + { + netInfo: &util.DefaultNetInfo{}, + initialDb: []libovsdbtest.TestData{ + &nbdb.LoadBalancer{ + UUID: loadBalancerClusterWideTCPServiceName(ns, serviceName), + Name: loadBalancerClusterWideTCPServiceName(ns, serviceName), + Options: servicesOptions(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + "192.168.0.1:6443": "", + }, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(ns, serviceName)), + }, + nodeLogicalSwitch(nodeA, initialLsGroups), + nodeLogicalSwitch(nodeB, initialLsGroups), + nodeLogicalSwitch("wrong-switch", []string{}, loadBalancerClusterWideTCPServiceName(ns, serviceName)), + nodeLogicalRouter(nodeA, initialLrGroups, loadBalancerClusterWideTCPServiceName(ns, serviceName)), + nodeLogicalRouter(nodeB, initialLrGroups, loadBalancerClusterWideTCPServiceName(ns, serviceName)), + nodeLogicalRouter("node-c", []string{}, loadBalancerClusterWideTCPServiceName(ns, serviceName)), + lbGroup(types.ClusterLBGroupName), + lbGroup(types.ClusterSwitchLBGroupName), + lbGroup(types.ClusterRouterLBGroupName), + }, + expectedDb: []libovsdbtest.TestData{ + &nbdb.LoadBalancer{ + UUID: loadBalancerClusterWideTCPServiceName(ns, serviceName), + Name: loadBalancerClusterWideTCPServiceName(ns, serviceName), + Options: servicesOptions(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + IPAndPort(serviceClusterIP, servicePort): "", + }, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(ns, serviceName)), + }, + nodeLogicalSwitch(nodeA, initialLsGroups), + nodeLogicalSwitch(nodeB, initialLsGroups), + nodeLogicalSwitch("wrong-switch", []string{}), + nodeLogicalRouter(nodeA, initialLrGroups), + nodeLogicalRouter(nodeB, initialLrGroups), + nodeLogicalRouter("node-c", []string{}), + lbGroup(types.ClusterLBGroupName, loadBalancerClusterWideTCPServiceName(ns, serviceName)), + lbGroup(types.ClusterSwitchLBGroupName), + lbGroup(types.ClusterRouterLBGroupName), + nodeIPTemplate(nodeAInfo), + nodeIPTemplate(nodeBInfo), + }, }, - nodeLogicalSwitch(nodeA, initialLsGroups), - nodeLogicalSwitch(nodeB, initialLsGroups), - nodeLogicalSwitch("wrong-switch", []string{}, loadBalancerClusterWideTCPServiceName(ns, serviceName)), - nodeLogicalRouter(nodeA, initialLrGroups, loadBalancerClusterWideTCPServiceName(ns, serviceName)), - nodeLogicalRouter(nodeB, initialLrGroups, loadBalancerClusterWideTCPServiceName(ns, serviceName)), - nodeLogicalRouter("node-c", []string{}, loadBalancerClusterWideTCPServiceName(ns, serviceName)), - lbGroup(types.ClusterLBGroupName), - lbGroup(types.ClusterSwitchLBGroupName), - lbGroup(types.ClusterRouterLBGroupName), - }, - expectedDb: []libovsdbtest.TestData{ - &nbdb.LoadBalancer{ - UUID: loadBalancerClusterWideTCPServiceName(ns, serviceName), - Name: loadBalancerClusterWideTCPServiceName(ns, serviceName), - Options: servicesOptions(), - Protocol: &nbdb.LoadBalancerProtocolTCP, - Vips: map[string]string{ - "192.168.1.1:80": "", - }, - ExternalIDs: serviceExternalIDs(namespacedServiceName(ns, serviceName)), + { + netInfo: l3UDN, + initialDb: []libovsdbtest.TestData{ + &nbdb.LoadBalancer{ + UUID: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l3UDN), + Name: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l3UDN), + Options: servicesOptions(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + "192.168.0.1:6443": "", + }, + ExternalIDs: loadBalancerExternalIDsForNetwork(namespacedServiceName(ns, serviceName), l3UDN.GetNetworkName()), + }, + nodeLogicalSwitch(nodeA, initialLsGroups), + nodeLogicalSwitch(nodeB, initialLsGroups), + nodeLogicalSwitchForNetwork(nodeA, initialLsGroups, l3UDN), + nodeLogicalSwitchForNetwork(nodeB, initialLsGroups, l3UDN), + nodeLogicalSwitchForNetwork("wrong-switch", []string{}, l3UDN, loadBalancerClusterWideTCPServiceName(ns, serviceName)), + + nodeLogicalRouter(nodeA, initialLrGroups), + nodeLogicalRouter(nodeB, initialLrGroups), + nodeLogicalRouterForNetwork(nodeA, initialLrGroups, l3UDN, loadBalancerClusterWideTCPServiceName(ns, serviceName)), + nodeLogicalRouterForNetwork(nodeB, initialLrGroups, l3UDN, loadBalancerClusterWideTCPServiceName(ns, serviceName)), + nodeLogicalRouterForNetwork("node-c", []string{}, l3UDN, loadBalancerClusterWideTCPServiceName(ns, serviceName)), + + lbGroup(types.ClusterLBGroupName), + lbGroup(types.ClusterSwitchLBGroupName), + lbGroup(types.ClusterRouterLBGroupName), + lbGroupForNetwork(types.ClusterLBGroupName, l3UDN), + lbGroupForNetwork(types.ClusterSwitchLBGroupName, l3UDN), + lbGroupForNetwork(types.ClusterRouterLBGroupName, l3UDN), + }, + expectedDb: []libovsdbtest.TestData{ + &nbdb.LoadBalancer{ + UUID: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l3UDN), + Name: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l3UDN), + Options: servicesOptions(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + IPAndPort(serviceClusterIP, servicePort): "", + }, + ExternalIDs: loadBalancerExternalIDsForNetwork(namespacedServiceName(ns, serviceName), l3UDN.GetNetworkName()), + }, + nodeLogicalSwitch(nodeA, initialLsGroups), + nodeLogicalSwitch(nodeB, initialLsGroups), + nodeLogicalSwitchForNetwork(nodeA, initialLsGroups, l3UDN), + nodeLogicalSwitchForNetwork(nodeB, initialLsGroups, l3UDN), + nodeLogicalSwitchForNetwork("wrong-switch", []string{}, l3UDN), + + nodeLogicalRouter(nodeA, initialLrGroups), + nodeLogicalRouter(nodeB, initialLrGroups), + nodeLogicalRouterForNetwork(nodeA, initialLrGroups, l3UDN), + nodeLogicalRouterForNetwork(nodeB, initialLrGroups, l3UDN), + nodeLogicalRouterForNetwork("node-c", []string{}, l3UDN), + + lbGroup(types.ClusterLBGroupName), + lbGroup(types.ClusterSwitchLBGroupName), + lbGroup(types.ClusterRouterLBGroupName), + lbGroupForNetwork(types.ClusterLBGroupName, l3UDN, clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l3UDN)), + lbGroupForNetwork(types.ClusterSwitchLBGroupName, l3UDN), + lbGroupForNetwork(types.ClusterRouterLBGroupName, l3UDN), + + nodeIPTemplate(nodeAInfo), + nodeIPTemplate(nodeBInfo), + }, + }, + { + netInfo: l2UDN, + initialDb: []libovsdbtest.TestData{ + &nbdb.LoadBalancer{ + UUID: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l2UDN), + Name: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l2UDN), + Options: servicesOptions(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + "192.168.0.1:6443": "", + }, + ExternalIDs: loadBalancerExternalIDsForNetwork(namespacedServiceName(ns, serviceName), l2UDN.GetNetworkName()), + }, + nodeLogicalSwitch(nodeA, initialLsGroups), + nodeLogicalSwitch(nodeB, initialLsGroups), + nodeLogicalSwitchForNetwork("", initialLsGroups, l2UDN), + + nodeLogicalRouter(nodeA, initialLrGroups), + nodeLogicalRouter(nodeB, initialLrGroups), + nodeLogicalRouterForNetwork(nodeA, initialLrGroups, l2UDN, loadBalancerClusterWideTCPServiceName(ns, serviceName)), + nodeLogicalRouterForNetwork(nodeB, initialLrGroups, l2UDN, loadBalancerClusterWideTCPServiceName(ns, serviceName)), + nodeLogicalRouterForNetwork("node-c", []string{}, l2UDN, loadBalancerClusterWideTCPServiceName(ns, serviceName)), + + lbGroup(types.ClusterLBGroupName), + lbGroup(types.ClusterSwitchLBGroupName), + lbGroup(types.ClusterRouterLBGroupName), + lbGroupForNetwork(types.ClusterLBGroupName, l2UDN), + lbGroupForNetwork(types.ClusterSwitchLBGroupName, l2UDN), + lbGroupForNetwork(types.ClusterRouterLBGroupName, l2UDN), + }, + expectedDb: []libovsdbtest.TestData{ + &nbdb.LoadBalancer{ + UUID: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l2UDN), + Name: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l2UDN), + Options: servicesOptions(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + IPAndPort(serviceClusterIP, servicePort): "", + }, + ExternalIDs: loadBalancerExternalIDsForNetwork(namespacedServiceName(ns, serviceName), l2UDN.GetNetworkName()), + }, + nodeLogicalSwitch(nodeA, initialLsGroups), + nodeLogicalSwitch(nodeB, initialLsGroups), + nodeLogicalSwitchForNetwork("", initialLsGroups, l2UDN), + + nodeLogicalRouter(nodeA, initialLrGroups), + nodeLogicalRouter(nodeB, initialLrGroups), + nodeLogicalRouterForNetwork(nodeA, initialLrGroups, l2UDN), + nodeLogicalRouterForNetwork(nodeB, initialLrGroups, l2UDN), + nodeLogicalRouterForNetwork("node-c", []string{}, l2UDN), + + lbGroup(types.ClusterLBGroupName), + lbGroup(types.ClusterSwitchLBGroupName), + lbGroup(types.ClusterRouterLBGroupName), + lbGroupForNetwork(types.ClusterLBGroupName, l2UDN, clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l2UDN)), + lbGroupForNetwork(types.ClusterSwitchLBGroupName, l2UDN), + lbGroupForNetwork(types.ClusterRouterLBGroupName, l2UDN), + + nodeIPTemplate(nodeAInfo), + nodeIPTemplate(nodeBInfo), + }, }, - nodeLogicalSwitch(nodeA, initialLsGroups), - nodeLogicalSwitch(nodeB, initialLsGroups), - nodeLogicalSwitch("wrong-switch", []string{}), - nodeLogicalRouter(nodeA, initialLrGroups), - nodeLogicalRouter(nodeB, initialLrGroups), - nodeLogicalRouter("node-c", []string{}), - lbGroup(types.ClusterLBGroupName, loadBalancerClusterWideTCPServiceName(ns, serviceName)), - lbGroup(types.ClusterSwitchLBGroupName), - lbGroup(types.ClusterRouterLBGroupName), - nodeIPTemplate(firstNode), - nodeIPTemplate(secondNode), }, }, { - name: "transition to endpoints, create nodeport", - slice: &discovery.EndpointSlice{ - ObjectMeta: metav1.ObjectMeta{ - Name: serviceName + "ab1", - Namespace: ns, - Labels: map[string]string{discovery.LabelServiceName: serviceName}, - }, - Ports: []discovery.EndpointPort{ - { - Protocol: &tcp, - Port: &outport, + name: "transition to endpoints, create nodeport", + nodeAInfo: nodeAInfo, + nodeBInfo: nodeBInfo, + slices: []discovery.EndpointSlice{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName + "ab1", + Namespace: ns, + Labels: map[string]string{discovery.LabelServiceName: serviceName}, }, - }, - AddressType: discovery.AddressTypeIPv4, - Endpoints: []discovery.Endpoint{ - { - Conditions: discovery.EndpointConditions{ - Ready: utilpointer.Bool(true), + Ports: []discovery.EndpointPort{ + { + Protocol: &tcp, + Port: &outPort, + }, + }, + AddressType: discovery.AddressTypeIPv4, + Endpoints: []discovery.Endpoint{ + { + Conditions: discovery.EndpointConditions{ + Ready: utilpointer.Bool(true), + }, + Addresses: []string{nodeAEndpoint}, + NodeName: &nodeA, + }, + { + Conditions: discovery.EndpointConditions{ + Ready: utilpointer.Bool(true), + }, + Addresses: []string{nodeBEndpointIP}, + NodeName: &nodeB, }, - Addresses: []string{"10.128.0.2", "10.128.1.2"}, - NodeName: &nodeA, }, }, }, @@ -313,81 +672,223 @@ func TestSyncServices(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: ns}, Spec: v1.ServiceSpec{ Type: v1.ServiceTypeClusterIP, - ClusterIP: "192.168.1.1", - ClusterIPs: []string{"192.168.1.1"}, + ClusterIP: serviceClusterIP, + ClusterIPs: []string{serviceClusterIP}, Selector: map[string]string{"foo": "bar"}, Ports: []v1.ServicePort{{ - Port: 80, + Port: servicePort, Protocol: v1.ProtocolTCP, - TargetPort: intstr.FromInt(3456), + TargetPort: intstr.FromInt32(outPort), NodePort: nodePort, }}, }, }, - initialDb: []libovsdbtest.TestData{ - &nbdb.LoadBalancer{ - UUID: loadBalancerClusterWideTCPServiceName(ns, serviceName), - Name: loadBalancerClusterWideTCPServiceName(ns, serviceName), - Options: servicesOptions(), - Protocol: &nbdb.LoadBalancerProtocolTCP, - Vips: map[string]string{ - "192.168.0.1:6443": "", - }, - ExternalIDs: serviceExternalIDs(namespacedServiceName(ns, serviceName)), + networks: []networkData{ + { + netInfo: &util.DefaultNetInfo{}, + initialDb: []libovsdbtest.TestData{ + &nbdb.LoadBalancer{ + UUID: loadBalancerClusterWideTCPServiceName(ns, serviceName), + Name: loadBalancerClusterWideTCPServiceName(ns, serviceName), + Options: servicesOptions(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + "192.168.0.1:6443": "", + }, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(ns, serviceName)), + }, + nodeLogicalSwitch(nodeA, initialLsGroups, loadBalancerClusterWideTCPServiceName(ns, serviceName)), + nodeLogicalSwitch(nodeB, initialLsGroups, loadBalancerClusterWideTCPServiceName(ns, serviceName)), + nodeLogicalRouter(nodeA, initialLrGroups, loadBalancerClusterWideTCPServiceName(ns, serviceName)), + nodeLogicalRouter(nodeB, initialLrGroups, loadBalancerClusterWideTCPServiceName(ns, serviceName)), + lbGroup(types.ClusterLBGroupName), + lbGroup(types.ClusterSwitchLBGroupName), + lbGroup(types.ClusterRouterLBGroupName), + }, + expectedDb: []libovsdbtest.TestData{ + &nbdb.LoadBalancer{ + UUID: loadBalancerClusterWideTCPServiceName(ns, serviceName), + Name: loadBalancerClusterWideTCPServiceName(ns, serviceName), + Options: servicesOptions(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + IPAndPort(serviceClusterIP, servicePort): formatEndpoints(outPort, nodeAEndpoint, nodeBEndpointIP), + }, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(ns, serviceName)), + }, + nodeMergedTemplateLoadBalancer(nodePort, serviceName, ns, outPort, nodeAEndpoint, nodeBEndpointIP), + nodeLogicalSwitch(nodeA, initialLsGroups), + nodeLogicalSwitch(nodeB, initialLsGroups), + nodeLogicalRouter(nodeA, initialLrGroups), + nodeLogicalRouter(nodeB, initialLrGroups), + lbGroup(types.ClusterLBGroupName, loadBalancerClusterWideTCPServiceName(ns, serviceName)), + lbGroup(types.ClusterSwitchLBGroupName, nodeMergedTemplateLoadBalancerName(ns, serviceName, v1.IPv4Protocol)), + lbGroup(types.ClusterRouterLBGroupName, nodeMergedTemplateLoadBalancerName(ns, serviceName, v1.IPv4Protocol)), + nodeIPTemplate(nodeAInfo), + nodeIPTemplate(nodeBInfo), + }, }, - nodeLogicalSwitch(nodeA, initialLsGroups, loadBalancerClusterWideTCPServiceName(ns, serviceName)), - nodeLogicalSwitch(nodeB, initialLsGroups, loadBalancerClusterWideTCPServiceName(ns, serviceName)), - nodeLogicalRouter(nodeA, initialLrGroups, loadBalancerClusterWideTCPServiceName(ns, serviceName)), - nodeLogicalRouter(nodeB, initialLrGroups, loadBalancerClusterWideTCPServiceName(ns, serviceName)), - lbGroup(types.ClusterLBGroupName), - lbGroup(types.ClusterSwitchLBGroupName), - lbGroup(types.ClusterRouterLBGroupName), - }, - expectedDb: []libovsdbtest.TestData{ - &nbdb.LoadBalancer{ - UUID: loadBalancerClusterWideTCPServiceName(ns, serviceName), - Name: loadBalancerClusterWideTCPServiceName(ns, serviceName), - Options: servicesOptions(), - Protocol: &nbdb.LoadBalancerProtocolTCP, - Vips: map[string]string{ - "192.168.1.1:80": "10.128.0.2:3456,10.128.1.2:3456", - }, - ExternalIDs: serviceExternalIDs(namespacedServiceName(ns, serviceName)), + { + netInfo: l3UDN, + initialDb: []libovsdbtest.TestData{ + &nbdb.LoadBalancer{ + UUID: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l3UDN), + Name: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l3UDN), + Options: servicesOptions(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + "192.168.0.1:6443": "", + }, + ExternalIDs: loadBalancerExternalIDsForNetwork(namespacedServiceName(ns, serviceName), l3UDN.GetNetworkName()), + }, + nodeLogicalSwitch(nodeA, initialLsGroups), + nodeLogicalSwitch(nodeB, initialLsGroups), + nodeLogicalSwitchForNetwork(nodeA, initialLsGroups, l3UDN, clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l3UDN)), + nodeLogicalSwitchForNetwork(nodeB, initialLsGroups, l3UDN, clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l3UDN)), + + nodeLogicalRouter(nodeA, initialLrGroups), + nodeLogicalRouter(nodeB, initialLrGroups), + nodeLogicalRouterForNetwork(nodeA, initialLrGroups, l3UDN, clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l3UDN)), + nodeLogicalRouterForNetwork(nodeB, initialLrGroups, l3UDN, clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l3UDN)), + + lbGroup(types.ClusterLBGroupName), + lbGroup(types.ClusterSwitchLBGroupName), + lbGroup(types.ClusterRouterLBGroupName), + lbGroupForNetwork(types.ClusterLBGroupName, l3UDN), + lbGroupForNetwork(types.ClusterSwitchLBGroupName, l3UDN), + lbGroupForNetwork(types.ClusterRouterLBGroupName, l3UDN), + }, + expectedDb: []libovsdbtest.TestData{ + &nbdb.LoadBalancer{ + UUID: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l3UDN), + Name: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l3UDN), + Options: servicesOptions(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + IPAndPort(serviceClusterIP, servicePort): formatEndpoints(outPort, nodeAEndpoint, nodeBEndpointIP), + }, + ExternalIDs: loadBalancerExternalIDsForNetwork(namespacedServiceName(ns, serviceName), l3UDN.GetNetworkName()), + }, + nodeMergedTemplateLoadBalancerForNetwork(nodePort, serviceName, ns, outPort, l3UDN, nodeAEndpoint, nodeBEndpointIP), + + nodeLogicalSwitch(nodeA, initialLsGroups), + nodeLogicalSwitch(nodeB, initialLsGroups), + nodeLogicalSwitchForNetwork(nodeA, initialLsGroups, l3UDN), + nodeLogicalSwitchForNetwork(nodeB, initialLsGroups, l3UDN), + + nodeLogicalRouter(nodeA, initialLrGroups), + nodeLogicalRouter(nodeB, initialLrGroups), + nodeLogicalRouterForNetwork(nodeA, initialLrGroups, l3UDN), + nodeLogicalRouterForNetwork(nodeB, initialLrGroups, l3UDN), + + lbGroup(types.ClusterLBGroupName), + lbGroup(types.ClusterSwitchLBGroupName), + lbGroup(types.ClusterRouterLBGroupName), + lbGroupForNetwork(types.ClusterLBGroupName, l3UDN, clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l3UDN)), + lbGroupForNetwork(types.ClusterSwitchLBGroupName, l3UDN, nodeMergedTemplateLoadBalancerNameForNetwork(ns, serviceName, v1.IPv4Protocol, l3UDN)), + lbGroupForNetwork(types.ClusterRouterLBGroupName, l3UDN, nodeMergedTemplateLoadBalancerNameForNetwork(ns, serviceName, v1.IPv4Protocol, l3UDN)), + + nodeIPTemplate(nodeAInfo), + nodeIPTemplate(nodeBInfo), + }, + }, + { + netInfo: l2UDN, + initialDb: []libovsdbtest.TestData{ + &nbdb.LoadBalancer{ + UUID: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l2UDN), + Name: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l2UDN), + Options: servicesOptions(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + "192.168.0.1:6443": "", + }, + ExternalIDs: loadBalancerExternalIDsForNetwork(namespacedServiceName(ns, serviceName), l2UDN.GetNetworkName()), + }, + nodeLogicalSwitch(nodeA, initialLsGroups), + nodeLogicalSwitch(nodeB, initialLsGroups), + nodeLogicalSwitchForNetwork("", initialLsGroups, l2UDN, clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l2UDN)), + + nodeLogicalRouter(nodeA, initialLrGroups), + nodeLogicalRouter(nodeB, initialLrGroups), + nodeLogicalRouterForNetwork(nodeA, initialLrGroups, l2UDN, clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l2UDN)), + nodeLogicalRouterForNetwork(nodeB, initialLrGroups, l2UDN, clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l2UDN)), + + lbGroup(types.ClusterLBGroupName), + lbGroup(types.ClusterSwitchLBGroupName), + lbGroup(types.ClusterRouterLBGroupName), + lbGroupForNetwork(types.ClusterLBGroupName, l2UDN), + lbGroupForNetwork(types.ClusterSwitchLBGroupName, l2UDN), + lbGroupForNetwork(types.ClusterRouterLBGroupName, l2UDN), + }, + expectedDb: []libovsdbtest.TestData{ + &nbdb.LoadBalancer{ + UUID: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l2UDN), + Name: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l2UDN), + Options: servicesOptions(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + IPAndPort(serviceClusterIP, servicePort): formatEndpoints(outPort, nodeAEndpoint, nodeBEndpointIP), + }, + ExternalIDs: loadBalancerExternalIDsForNetwork(namespacedServiceName(ns, serviceName), l2UDN.GetNetworkName()), + }, + nodeMergedTemplateLoadBalancerForNetwork(nodePort, serviceName, ns, outPort, l2UDN, nodeAEndpoint, nodeBEndpointIP), + + nodeLogicalSwitch(nodeA, initialLsGroups), + nodeLogicalSwitch(nodeB, initialLsGroups), + nodeLogicalSwitchForNetwork("", initialLsGroups, l2UDN), + + nodeLogicalRouter(nodeA, initialLrGroups), + nodeLogicalRouter(nodeB, initialLrGroups), + nodeLogicalRouterForNetwork(nodeA, initialLrGroups, l2UDN), + nodeLogicalRouterForNetwork(nodeB, initialLrGroups, l2UDN), + + lbGroup(types.ClusterLBGroupName), + lbGroup(types.ClusterSwitchLBGroupName), + lbGroup(types.ClusterRouterLBGroupName), + lbGroupForNetwork(types.ClusterLBGroupName, l2UDN, clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l2UDN)), + lbGroupForNetwork(types.ClusterSwitchLBGroupName, l2UDN, nodeMergedTemplateLoadBalancerNameForNetwork(ns, serviceName, v1.IPv4Protocol, l2UDN)), + lbGroupForNetwork(types.ClusterRouterLBGroupName, l2UDN, nodeMergedTemplateLoadBalancerNameForNetwork(ns, serviceName, v1.IPv4Protocol, l2UDN)), + + nodeIPTemplate(nodeAInfo), + nodeIPTemplate(nodeBInfo), + }, }, - nodeMergedTemplateLoadBalancer(nodePort, serviceName, ns, outport, nodeAEndpointIP, nodeBEndpointIP), - nodeLogicalSwitch(nodeA, initialLsGroups), - nodeLogicalSwitch(nodeB, initialLsGroups), - nodeLogicalRouter(nodeA, initialLrGroups), - nodeLogicalRouter(nodeB, initialLrGroups), - lbGroup(types.ClusterLBGroupName, loadBalancerClusterWideTCPServiceName(ns, serviceName)), - lbGroup(types.ClusterSwitchLBGroupName, nodeMergedTemplateLoadBalancerName(ns, serviceName, v1.IPv4Protocol)), - lbGroup(types.ClusterRouterLBGroupName, nodeMergedTemplateLoadBalancerName(ns, serviceName, v1.IPv4Protocol)), - nodeIPTemplate(firstNode), - nodeIPTemplate(secondNode), }, }, { - name: "deleting a node should not leave stale load balancers", - slice: &discovery.EndpointSlice{ - ObjectMeta: metav1.ObjectMeta{ - Name: serviceName + "ab1", - Namespace: ns, - Labels: map[string]string{discovery.LabelServiceName: serviceName}, - }, - Ports: []discovery.EndpointPort{ - { - Protocol: &tcp, - Port: &outport, + name: "deleting a node should not leave stale load balancers", + nodeAInfo: nodeAInfo, + nodeBInfo: nodeBInfo, + slices: []discovery.EndpointSlice{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName + "ab1", + Namespace: ns, + Labels: map[string]string{discovery.LabelServiceName: serviceName}, }, - }, - AddressType: discovery.AddressTypeIPv4, - Endpoints: []discovery.Endpoint{ - { - Conditions: discovery.EndpointConditions{ - Ready: utilpointer.Bool(true), + Ports: []discovery.EndpointPort{ + { + Protocol: &tcp, + Port: &outPort, + }, + }, + AddressType: discovery.AddressTypeIPv4, + Endpoints: []discovery.Endpoint{ + { + Conditions: discovery.EndpointConditions{ + Ready: utilpointer.Bool(true), + }, + Addresses: []string{nodeAEndpoint}, + NodeName: &nodeA, + }, + { + Conditions: discovery.EndpointConditions{ + Ready: utilpointer.Bool(true), + }, + Addresses: []string{nodeBEndpointIP}, + NodeName: &nodeB, }, - Addresses: []string{"10.128.0.2", "10.128.1.2"}, - NodeName: &nodeA, }, }, }, @@ -395,273 +896,678 @@ func TestSyncServices(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: ns}, Spec: v1.ServiceSpec{ Type: v1.ServiceTypeClusterIP, - ClusterIP: "192.168.1.1", - ClusterIPs: []string{"192.168.1.1"}, + ClusterIP: serviceClusterIP, + ClusterIPs: []string{serviceClusterIP}, Selector: map[string]string{"foo": "bar"}, Ports: []v1.ServicePort{{ - Port: 80, + Port: servicePort, Protocol: v1.ProtocolTCP, - TargetPort: intstr.FromInt(3456), + TargetPort: intstr.FromInt32(outPort), NodePort: nodePort, }}, }, }, - initialDb: []libovsdbtest.TestData{ - &nbdb.LoadBalancer{ - UUID: loadBalancerClusterWideTCPServiceName(ns, serviceName), - Name: loadBalancerClusterWideTCPServiceName(ns, serviceName), - Options: servicesOptions(), - Protocol: &nbdb.LoadBalancerProtocolTCP, - Vips: map[string]string{ - "192.168.0.1:6443": "", - }, - ExternalIDs: serviceExternalIDs(namespacedServiceName(ns, serviceName)), + networks: []networkData{ + { + netInfo: &util.DefaultNetInfo{}, + initialDb: []libovsdbtest.TestData{ + &nbdb.LoadBalancer{ + UUID: loadBalancerClusterWideTCPServiceName(ns, serviceName), + Name: loadBalancerClusterWideTCPServiceName(ns, serviceName), + Options: servicesOptions(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + "192.168.0.1:6443": "", + }, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(ns, serviceName)), + }, + nodeLogicalSwitch(nodeA, initialLsGroups), + nodeLogicalSwitch(nodeB, initialLsGroups), + nodeLogicalRouter(nodeA, initialLrGroups), + nodeLogicalRouter(nodeB, initialLrGroups), + lbGroup(types.ClusterLBGroupName, loadBalancerClusterWideTCPServiceName(ns, serviceName)), + lbGroup(types.ClusterSwitchLBGroupName), + lbGroup(types.ClusterRouterLBGroupName), + }, + expectedDb: []libovsdbtest.TestData{ + &nbdb.LoadBalancer{ + UUID: loadBalancerClusterWideTCPServiceName(ns, serviceName), + Name: loadBalancerClusterWideTCPServiceName(ns, serviceName), + Options: servicesOptions(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + IPAndPort(serviceClusterIP, servicePort): formatEndpoints(outPort, nodeAEndpoint, nodeBEndpointIP), + }, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(ns, serviceName)), + }, + nodeMergedTemplateLoadBalancer(nodePort, serviceName, ns, outPort, nodeAEndpoint, nodeBEndpointIP), + nodeLogicalSwitch(nodeA, initialLsGroups), + nodeLogicalSwitch(nodeB, initialLsGroups), + nodeLogicalRouter(nodeA, initialLrGroups), + nodeLogicalRouter(nodeB, initialLrGroups), + lbGroup(types.ClusterLBGroupName, loadBalancerClusterWideTCPServiceName(ns, serviceName)), + lbGroup(types.ClusterSwitchLBGroupName, nodeMergedTemplateLoadBalancerName(ns, serviceName, v1.IPv4Protocol)), + lbGroup(types.ClusterRouterLBGroupName, nodeMergedTemplateLoadBalancerName(ns, serviceName, v1.IPv4Protocol)), + nodeIPTemplate(nodeAInfo), + nodeIPTemplate(nodeBInfo), + }, + dbStateAfterDeleting: []libovsdbtest.TestData{ + &nbdb.LoadBalancer{ + UUID: loadBalancerClusterWideTCPServiceName(ns, serviceName), + Name: loadBalancerClusterWideTCPServiceName(ns, serviceName), + Options: servicesOptions(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + IPAndPort(serviceClusterIP, servicePort): formatEndpoints(outPort, nodeAEndpoint, nodeBEndpointIP), + }, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(ns, serviceName)), + }, + nodeMergedTemplateLoadBalancer(nodePort, serviceName, ns, outPort, nodeAEndpoint, nodeBEndpointIP), + nodeLogicalSwitch(nodeA, initialLsGroups), + nodeLogicalSwitch(nodeB, initialLsGroups), + nodeLogicalRouter(nodeA, initialLrGroups), + nodeLogicalRouter(nodeB, initialLrGroups), + lbGroup(types.ClusterLBGroupName, loadBalancerClusterWideTCPServiceName(ns, serviceName)), + lbGroup(types.ClusterSwitchLBGroupName, nodeMergedTemplateLoadBalancerName(ns, serviceName, v1.IPv4Protocol)), + lbGroup(types.ClusterRouterLBGroupName, nodeMergedTemplateLoadBalancerName(ns, serviceName, v1.IPv4Protocol)), + nodeIPTemplate(nodeAInfo), + nodeIPTemplate(nodeBInfo), + }, + }, + { + netInfo: l3UDN, + initialDb: []libovsdbtest.TestData{ + &nbdb.LoadBalancer{ + UUID: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l3UDN), + Name: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l3UDN), + Options: servicesOptions(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + "192.168.0.1:6443": "", + }, + ExternalIDs: loadBalancerExternalIDsForNetwork(namespacedServiceName(ns, serviceName), l3UDN.GetNetworkName()), + }, + nodeLogicalSwitch(nodeA, initialLsGroups), + nodeLogicalSwitch(nodeB, initialLsGroups), + nodeLogicalSwitchForNetwork(nodeA, initialLsGroups, l3UDN), + nodeLogicalSwitchForNetwork(nodeB, initialLsGroups, l3UDN), + + nodeLogicalRouter(nodeA, initialLrGroups), + nodeLogicalRouter(nodeB, initialLrGroups), + nodeLogicalRouterForNetwork(nodeA, initialLrGroups, l3UDN), + nodeLogicalRouterForNetwork(nodeB, initialLrGroups, l3UDN), + + lbGroup(types.ClusterLBGroupName), + lbGroup(types.ClusterSwitchLBGroupName), + lbGroup(types.ClusterRouterLBGroupName), + lbGroupForNetwork(types.ClusterLBGroupName, l3UDN, clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l3UDN)), + lbGroupForNetwork(types.ClusterSwitchLBGroupName, l3UDN), + lbGroupForNetwork(types.ClusterRouterLBGroupName, l3UDN), + }, + expectedDb: []libovsdbtest.TestData{ + &nbdb.LoadBalancer{ + UUID: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l3UDN), + Name: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l3UDN), + Options: servicesOptions(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + IPAndPort(serviceClusterIP, servicePort): formatEndpoints(outPort, nodeAEndpoint, nodeBEndpointIP), + }, + ExternalIDs: loadBalancerExternalIDsForNetwork(namespacedServiceName(ns, serviceName), l3UDN.GetNetworkName()), + }, + nodeMergedTemplateLoadBalancerForNetwork(nodePort, serviceName, ns, outPort, l3UDN, nodeAEndpoint, nodeBEndpointIP), + nodeLogicalSwitch(nodeA, initialLsGroups), + nodeLogicalSwitch(nodeB, initialLsGroups), + nodeLogicalSwitchForNetwork(nodeA, initialLsGroups, l3UDN), + nodeLogicalSwitchForNetwork(nodeB, initialLsGroups, l3UDN), + + nodeLogicalRouter(nodeA, initialLrGroups), + nodeLogicalRouter(nodeB, initialLrGroups), + nodeLogicalRouterForNetwork(nodeA, initialLrGroups, l3UDN), + nodeLogicalRouterForNetwork(nodeB, initialLrGroups, l3UDN), + + lbGroup(types.ClusterLBGroupName), + lbGroup(types.ClusterSwitchLBGroupName), + lbGroup(types.ClusterRouterLBGroupName), + lbGroupForNetwork(types.ClusterLBGroupName, l3UDN, clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l3UDN)), + lbGroupForNetwork(types.ClusterSwitchLBGroupName, l3UDN, nodeMergedTemplateLoadBalancerNameForNetwork(ns, serviceName, v1.IPv4Protocol, l3UDN)), + lbGroupForNetwork(types.ClusterRouterLBGroupName, l3UDN, nodeMergedTemplateLoadBalancerNameForNetwork(ns, serviceName, v1.IPv4Protocol, l3UDN)), + + nodeIPTemplate(nodeAInfo), + nodeIPTemplate(nodeBInfo), + }, + dbStateAfterDeleting: []libovsdbtest.TestData{ + &nbdb.LoadBalancer{ + UUID: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l3UDN), + Name: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l3UDN), + Options: servicesOptions(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + IPAndPort(serviceClusterIP, servicePort): formatEndpoints(outPort, nodeAEndpoint, nodeBEndpointIP), + }, + ExternalIDs: loadBalancerExternalIDsForNetwork(namespacedServiceName(ns, serviceName), l3UDN.GetNetworkName()), + }, + nodeMergedTemplateLoadBalancerForNetwork(nodePort, serviceName, ns, outPort, l3UDN, nodeAEndpoint, nodeBEndpointIP), + nodeLogicalSwitch(nodeA, initialLsGroups), + nodeLogicalSwitch(nodeB, initialLsGroups), + nodeLogicalSwitchForNetwork(nodeA, initialLsGroups, l3UDN), + nodeLogicalSwitchForNetwork(nodeB, initialLsGroups, l3UDN), + + nodeLogicalRouter(nodeA, initialLrGroups), + nodeLogicalRouter(nodeB, initialLrGroups), + nodeLogicalRouterForNetwork(nodeA, initialLrGroups, l3UDN), + nodeLogicalRouterForNetwork(nodeB, initialLrGroups, l3UDN), + + lbGroup(types.ClusterLBGroupName), + lbGroup(types.ClusterSwitchLBGroupName), + lbGroup(types.ClusterRouterLBGroupName), + lbGroupForNetwork(types.ClusterLBGroupName, l3UDN, clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l3UDN)), + lbGroupForNetwork(types.ClusterSwitchLBGroupName, l3UDN, nodeMergedTemplateLoadBalancerNameForNetwork(ns, serviceName, v1.IPv4Protocol, l3UDN)), + lbGroupForNetwork(types.ClusterRouterLBGroupName, l3UDN, nodeMergedTemplateLoadBalancerNameForNetwork(ns, serviceName, v1.IPv4Protocol, l3UDN)), + + nodeIPTemplate(nodeAInfo), + nodeIPTemplate(nodeBInfo), + }, + }, + { + netInfo: l2UDN, + initialDb: []libovsdbtest.TestData{ + &nbdb.LoadBalancer{ + UUID: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l2UDN), + Name: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l2UDN), + Options: servicesOptions(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + "192.168.0.1:6443": "", + }, + ExternalIDs: loadBalancerExternalIDsForNetwork(namespacedServiceName(ns, serviceName), l2UDN.GetNetworkName()), + }, + nodeLogicalSwitch(nodeA, initialLsGroups), + nodeLogicalSwitch(nodeB, initialLsGroups), + nodeLogicalSwitchForNetwork("", initialLsGroups, l2UDN), + + nodeLogicalRouter(nodeA, initialLrGroups), + nodeLogicalRouter(nodeB, initialLrGroups), + nodeLogicalRouterForNetwork(nodeA, initialLrGroups, l2UDN), + nodeLogicalRouterForNetwork(nodeB, initialLrGroups, l2UDN), + + lbGroup(types.ClusterLBGroupName), + lbGroup(types.ClusterSwitchLBGroupName), + lbGroup(types.ClusterRouterLBGroupName), + lbGroupForNetwork(types.ClusterLBGroupName, l2UDN, clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l2UDN)), + lbGroupForNetwork(types.ClusterSwitchLBGroupName, l2UDN), + lbGroupForNetwork(types.ClusterRouterLBGroupName, l2UDN), + }, + expectedDb: []libovsdbtest.TestData{ + &nbdb.LoadBalancer{ + UUID: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l2UDN), + Name: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l2UDN), + Options: servicesOptions(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + IPAndPort(serviceClusterIP, servicePort): formatEndpoints(outPort, nodeAEndpoint, nodeBEndpointIP), + }, + ExternalIDs: loadBalancerExternalIDsForNetwork(namespacedServiceName(ns, serviceName), l2UDN.GetNetworkName()), + }, + nodeMergedTemplateLoadBalancerForNetwork(nodePort, serviceName, ns, outPort, l2UDN, nodeAEndpoint, nodeBEndpointIP), + nodeLogicalSwitch(nodeA, initialLsGroups), + nodeLogicalSwitch(nodeB, initialLsGroups), + nodeLogicalSwitchForNetwork("", initialLsGroups, l2UDN), + + nodeLogicalRouter(nodeA, initialLrGroups), + nodeLogicalRouter(nodeB, initialLrGroups), + nodeLogicalRouterForNetwork(nodeA, initialLrGroups, l2UDN), + nodeLogicalRouterForNetwork(nodeB, initialLrGroups, l2UDN), + + lbGroup(types.ClusterLBGroupName), + lbGroup(types.ClusterSwitchLBGroupName), + lbGroup(types.ClusterRouterLBGroupName), + lbGroupForNetwork(types.ClusterLBGroupName, l2UDN, clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l2UDN)), + lbGroupForNetwork(types.ClusterSwitchLBGroupName, l2UDN, nodeMergedTemplateLoadBalancerNameForNetwork(ns, serviceName, v1.IPv4Protocol, l2UDN)), + lbGroupForNetwork(types.ClusterRouterLBGroupName, l2UDN, nodeMergedTemplateLoadBalancerNameForNetwork(ns, serviceName, v1.IPv4Protocol, l2UDN)), + + nodeIPTemplate(nodeAInfo), + nodeIPTemplate(nodeBInfo), + }, + dbStateAfterDeleting: []libovsdbtest.TestData{ + &nbdb.LoadBalancer{ + UUID: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l2UDN), + Name: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l2UDN), + Options: servicesOptions(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + IPAndPort(serviceClusterIP, servicePort): formatEndpoints(outPort, nodeAEndpoint, nodeBEndpointIP), + }, + ExternalIDs: loadBalancerExternalIDsForNetwork(namespacedServiceName(ns, serviceName), l2UDN.GetNetworkName()), + }, + nodeMergedTemplateLoadBalancerForNetwork(nodePort, serviceName, ns, outPort, l2UDN, nodeAEndpoint, nodeBEndpointIP), + nodeLogicalSwitch(nodeA, initialLsGroups), + nodeLogicalSwitch(nodeB, initialLsGroups), + nodeLogicalSwitchForNetwork("", initialLsGroups, l2UDN), + + nodeLogicalRouter(nodeA, initialLrGroups), + nodeLogicalRouter(nodeB, initialLrGroups), + nodeLogicalRouterForNetwork(nodeA, initialLrGroups, l2UDN), + nodeLogicalRouterForNetwork(nodeB, initialLrGroups, l2UDN), + + lbGroup(types.ClusterLBGroupName), + lbGroup(types.ClusterSwitchLBGroupName), + lbGroup(types.ClusterRouterLBGroupName), + lbGroupForNetwork(types.ClusterLBGroupName, l2UDN, clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l2UDN)), + lbGroupForNetwork(types.ClusterSwitchLBGroupName, l2UDN, nodeMergedTemplateLoadBalancerNameForNetwork(ns, serviceName, v1.IPv4Protocol, l2UDN)), + lbGroupForNetwork(types.ClusterRouterLBGroupName, l2UDN, nodeMergedTemplateLoadBalancerNameForNetwork(ns, serviceName, v1.IPv4Protocol, l2UDN)), + + nodeIPTemplate(nodeAInfo), + nodeIPTemplate(nodeBInfo), + }, }, - nodeLogicalSwitch(nodeA, initialLsGroups), - nodeLogicalSwitch(nodeB, initialLsGroups), - nodeLogicalRouter(nodeA, initialLrGroups), - nodeLogicalRouter(nodeB, initialLrGroups), - lbGroup(types.ClusterLBGroupName, - loadBalancerClusterWideTCPServiceName(ns, serviceName)), - lbGroup(types.ClusterSwitchLBGroupName), - lbGroup(types.ClusterRouterLBGroupName), }, - expectedDb: []libovsdbtest.TestData{ - &nbdb.LoadBalancer{ - UUID: loadBalancerClusterWideTCPServiceName(ns, serviceName), - Name: loadBalancerClusterWideTCPServiceName(ns, serviceName), - Options: servicesOptions(), - Protocol: &nbdb.LoadBalancerProtocolTCP, - Vips: map[string]string{ - "192.168.1.1:80": "10.128.0.2:3456,10.128.1.2:3456", - }, - ExternalIDs: serviceExternalIDs(namespacedServiceName(ns, serviceName)), + + nodeToDelete: nodeA, + }, + { + // Test for multiple IP support in Template LBs (https://github.com/ovn-org/ovn-kubernetes/pull/3557) + name: "NodePort service, multiple IP addresses, ETP=cluster", + enableIPv6: true, + nodeAInfo: nodeAInfoMultiIP, + nodeBInfo: nil, + slices: []discovery.EndpointSlice{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName + "ipv4", + Namespace: ns, + Labels: map[string]string{discovery.LabelServiceName: serviceName}, + }, + Ports: []discovery.EndpointPort{{Protocol: &tcp, Port: &outPort}}, + AddressType: discovery.AddressTypeIPv4, + Endpoints: kubetest.MakeReadyEndpointList(nodeA, nodeAEndpoint, nodeAEndpoint2), + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName + "ipv6", + Namespace: ns, + Labels: map[string]string{discovery.LabelServiceName: serviceName}, + }, + Ports: []discovery.EndpointPort{{Protocol: &tcp, Port: &outPort}}, + AddressType: discovery.AddressTypeIPv6, + Endpoints: kubetest.MakeReadyEndpointList(nodeA, nodeAEndpointV6, nodeAEndpoint2V6), + }, + }, + service: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: ns}, + Spec: v1.ServiceSpec{ + Type: v1.ServiceTypeNodePort, + ClusterIP: serviceClusterIP, + ClusterIPs: []string{serviceClusterIP, serviceClusterIPv6}, + IPFamilies: []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol}, + Selector: map[string]string{"foo": "bar"}, + ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyTypeCluster, + Ports: []v1.ServicePort{{ + Port: servicePort, + Protocol: v1.ProtocolTCP, + TargetPort: intstr.FromInt32(outPort), + NodePort: 30123, + }}, }, - nodeMergedTemplateLoadBalancer(nodePort, serviceName, ns, outport, nodeAEndpointIP, nodeBEndpointIP), - nodeLogicalSwitch(nodeA, initialLsGroups), - nodeLogicalSwitch(nodeB, initialLsGroups), - nodeLogicalRouter(nodeA, initialLrGroups), - nodeLogicalRouter(nodeB, initialLrGroups), - lbGroup(types.ClusterLBGroupName, loadBalancerClusterWideTCPServiceName(ns, serviceName)), - lbGroup(types.ClusterSwitchLBGroupName, nodeMergedTemplateLoadBalancerName(ns, serviceName, v1.IPv4Protocol)), - lbGroup(types.ClusterRouterLBGroupName, nodeMergedTemplateLoadBalancerName(ns, serviceName, v1.IPv4Protocol)), - nodeIPTemplate(firstNode), - nodeIPTemplate(secondNode), }, - nodeToDelete: nodeConfig(nodeA, nodeAHostIP), - dbStateAfterDeleting: []libovsdbtest.TestData{ - &nbdb.LoadBalancer{ - UUID: loadBalancerClusterWideTCPServiceName(ns, serviceName), - Name: loadBalancerClusterWideTCPServiceName(ns, serviceName), - Options: servicesOptions(), - Protocol: &nbdb.LoadBalancerProtocolTCP, - Vips: map[string]string{ - "192.168.1.1:80": "10.128.0.2:3456,10.128.1.2:3456", - }, - ExternalIDs: serviceExternalIDs(namespacedServiceName(ns, serviceName)), + networks: []networkData{ + { + netInfo: &util.DefaultNetInfo{}, + initialDb: []libovsdbtest.TestData{ + nodeLogicalSwitch(nodeA, initialLsGroups), + nodeLogicalRouter(nodeA, initialLrGroups), + + lbGroup(types.ClusterLBGroupName), + lbGroup(types.ClusterSwitchLBGroupName), + lbGroup(types.ClusterRouterLBGroupName), + }, + expectedDb: []libovsdbtest.TestData{ + &nbdb.LoadBalancer{ + UUID: loadBalancerClusterWideTCPServiceName(ns, serviceName), + Name: loadBalancerClusterWideTCPServiceName(ns, serviceName), + Options: servicesOptions(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + IPAndPort(serviceClusterIP, servicePort): formatEndpoints(outPort, nodeAEndpoint, nodeAEndpoint2), + IPAndPort(serviceClusterIPv6, servicePort): formatEndpoints(outPort, nodeAEndpointV6, nodeAEndpoint2V6), + }, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(ns, serviceName)), + }, + &nbdb.LoadBalancer{ + UUID: nodeMergedTemplateLoadBalancerName(ns, serviceName, v1.IPv4Protocol), + Name: nodeMergedTemplateLoadBalancerName(ns, serviceName, v1.IPv4Protocol), + Options: templateServicesOptions(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + "^NODEIP_IPv4_1:30123": formatEndpoints(outPort, nodeAEndpoint, nodeAEndpoint2), + "^NODEIP_IPv4_2:30123": formatEndpoints(outPort, nodeAEndpoint, nodeAEndpoint2), + "^NODEIP_IPv4_0:30123": formatEndpoints(outPort, nodeAEndpoint, nodeAEndpoint2), + }, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(ns, serviceName)), + }, + &nbdb.LoadBalancer{ + UUID: nodeMergedTemplateLoadBalancerName(ns, serviceName, v1.IPv6Protocol), + Name: nodeMergedTemplateLoadBalancerName(ns, serviceName, v1.IPv6Protocol), + Options: templateServicesOptionsV6(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + "^NODEIP_IPv6_1:30123": formatEndpoints(outPort, nodeAEndpointV6, nodeAEndpoint2V6), + "^NODEIP_IPv6_0:30123": formatEndpoints(outPort, nodeAEndpointV6, nodeAEndpoint2V6), + }, + ExternalIDs: loadBalancerExternalIDs(namespacedServiceName(ns, serviceName)), + }, + nodeLogicalSwitch(nodeA, initialLsGroups), + nodeLogicalRouter(nodeA, initialLrGroups), + lbGroup(types.ClusterLBGroupName, loadBalancerClusterWideTCPServiceName(ns, serviceName)), + lbGroup(types.ClusterSwitchLBGroupName, + nodeMergedTemplateLoadBalancerName(ns, serviceName, v1.IPv4Protocol), + nodeMergedTemplateLoadBalancerName(ns, serviceName, v1.IPv6Protocol)), + lbGroup(types.ClusterRouterLBGroupName, + nodeMergedTemplateLoadBalancerName(ns, serviceName, v1.IPv4Protocol), + nodeMergedTemplateLoadBalancerName(ns, serviceName, v1.IPv6Protocol)), + + &nbdb.ChassisTemplateVar{ + UUID: nodeA, Chassis: nodeA, + Variables: map[string]string{ + makeLBNodeIPTemplateNamePrefix(v1.IPv4Protocol) + "0": nodeAMultiAddressesV4[0], + makeLBNodeIPTemplateNamePrefix(v1.IPv4Protocol) + "1": nodeAMultiAddressesV4[1], + makeLBNodeIPTemplateNamePrefix(v1.IPv4Protocol) + "2": nodeAMultiAddressesV4[2], + + makeLBNodeIPTemplateNamePrefix(v1.IPv6Protocol) + "0": nodeAMultiAddressesV6[0], + makeLBNodeIPTemplateNamePrefix(v1.IPv6Protocol) + "1": nodeAMultiAddressesV6[1], + }, + }, + }, + }, + { + netInfo: l3UDN, + initialDb: []libovsdbtest.TestData{ + nodeLogicalSwitch(nodeA, initialLsGroups), + nodeLogicalSwitchForNetwork(nodeA, initialLsGroups, l3UDN), + + nodeLogicalRouter(nodeA, initialLrGroups), + nodeLogicalRouterForNetwork(nodeA, initialLrGroups, l3UDN), + + lbGroup(types.ClusterLBGroupName), + lbGroup(types.ClusterSwitchLBGroupName), + lbGroup(types.ClusterRouterLBGroupName), + lbGroupForNetwork(types.ClusterLBGroupName, l3UDN), + lbGroupForNetwork(types.ClusterSwitchLBGroupName, l3UDN), + lbGroupForNetwork(types.ClusterRouterLBGroupName, l3UDN), + }, + expectedDb: []libovsdbtest.TestData{ + &nbdb.LoadBalancer{ + UUID: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l3UDN), + Name: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l3UDN), + Options: servicesOptions(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + IPAndPort(serviceClusterIP, servicePort): formatEndpoints(outPort, nodeAEndpoint, nodeAEndpoint2), + IPAndPort(serviceClusterIPv6, servicePort): formatEndpoints(outPort, nodeAEndpointV6, nodeAEndpoint2V6), + }, + ExternalIDs: loadBalancerExternalIDsForNetwork(namespacedServiceName(ns, serviceName), l3UDN.GetNetworkName()), + }, + &nbdb.LoadBalancer{ + UUID: nodeMergedTemplateLoadBalancerNameForNetwork(ns, serviceName, v1.IPv4Protocol, l3UDN), + Name: nodeMergedTemplateLoadBalancerNameForNetwork(ns, serviceName, v1.IPv4Protocol, l3UDN), + Options: templateServicesOptions(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + "^NODEIP_IPv4_1:30123": formatEndpoints(outPort, nodeAEndpoint, nodeAEndpoint2), + "^NODEIP_IPv4_2:30123": formatEndpoints(outPort, nodeAEndpoint, nodeAEndpoint2), + "^NODEIP_IPv4_0:30123": formatEndpoints(outPort, nodeAEndpoint, nodeAEndpoint2), + }, + ExternalIDs: loadBalancerExternalIDsForNetwork(namespacedServiceName(ns, serviceName), l3UDN.GetNetworkName()), + }, + &nbdb.LoadBalancer{ + UUID: nodeMergedTemplateLoadBalancerNameForNetwork(ns, serviceName, v1.IPv6Protocol, l3UDN), + Name: nodeMergedTemplateLoadBalancerNameForNetwork(ns, serviceName, v1.IPv6Protocol, l3UDN), + Options: templateServicesOptionsV6(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + "^NODEIP_IPv6_1:30123": formatEndpoints(outPort, nodeAEndpointV6, nodeAEndpoint2V6), + "^NODEIP_IPv6_0:30123": formatEndpoints(outPort, nodeAEndpointV6, nodeAEndpoint2V6), + }, + ExternalIDs: loadBalancerExternalIDsForNetwork(namespacedServiceName(ns, serviceName), l3UDN.GetNetworkName()), + }, + + nodeLogicalSwitchForNetwork(nodeAInfo.name, initialLsGroups, l3UDN), + nodeLogicalRouterForNetwork(nodeAInfo.name, initialLrGroups, l3UDN), + + nodeLogicalSwitch(nodeAInfo.name, initialLsGroups), + nodeLogicalRouter(nodeAInfo.name, initialLrGroups), + + lbGroup(types.ClusterLBGroupName), + lbGroup(types.ClusterSwitchLBGroupName), + lbGroup(types.ClusterRouterLBGroupName), + + lbGroupForNetwork(types.ClusterLBGroupName, l3UDN, clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l3UDN)), + lbGroupForNetwork(types.ClusterSwitchLBGroupName, + l3UDN, + nodeMergedTemplateLoadBalancerNameForNetwork(ns, serviceName, v1.IPv4Protocol, l3UDN), + nodeMergedTemplateLoadBalancerNameForNetwork(ns, serviceName, v1.IPv6Protocol, l3UDN)), + lbGroupForNetwork(types.ClusterRouterLBGroupName, + l3UDN, + nodeMergedTemplateLoadBalancerNameForNetwork(ns, serviceName, v1.IPv4Protocol, l3UDN), + nodeMergedTemplateLoadBalancerNameForNetwork(ns, serviceName, v1.IPv6Protocol, l3UDN)), + + &nbdb.ChassisTemplateVar{ + UUID: nodeAInfo.chassisID, Chassis: nodeAInfo.chassisID, + Variables: map[string]string{ + makeLBNodeIPTemplateNamePrefix(v1.IPv4Protocol) + "0": nodeAMultiAddressesV4[0], + makeLBNodeIPTemplateNamePrefix(v1.IPv4Protocol) + "1": nodeAMultiAddressesV4[1], + makeLBNodeIPTemplateNamePrefix(v1.IPv4Protocol) + "2": nodeAMultiAddressesV4[2], + + makeLBNodeIPTemplateNamePrefix(v1.IPv6Protocol) + "0": nodeAMultiAddressesV6[0], + makeLBNodeIPTemplateNamePrefix(v1.IPv6Protocol) + "1": nodeAMultiAddressesV6[1], + }, + }, + }, + }, + { + netInfo: l2UDN, + initialDb: []libovsdbtest.TestData{ + nodeLogicalSwitch(nodeA, initialLsGroups), + nodeLogicalSwitchForNetwork("", initialLsGroups, l2UDN), + + nodeLogicalRouter(nodeA, initialLrGroups), + nodeLogicalRouterForNetwork(nodeA, initialLrGroups, l2UDN), + + lbGroup(types.ClusterLBGroupName), + lbGroup(types.ClusterSwitchLBGroupName), + lbGroup(types.ClusterRouterLBGroupName), + lbGroupForNetwork(types.ClusterLBGroupName, l2UDN), + lbGroupForNetwork(types.ClusterSwitchLBGroupName, l2UDN), + lbGroupForNetwork(types.ClusterRouterLBGroupName, l2UDN), + }, + expectedDb: []libovsdbtest.TestData{ + &nbdb.LoadBalancer{ + UUID: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l2UDN), + Name: clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l2UDN), + Options: servicesOptions(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + IPAndPort(serviceClusterIP, servicePort): formatEndpoints(outPort, nodeAEndpoint, nodeAEndpoint2), + IPAndPort(serviceClusterIPv6, servicePort): formatEndpoints(outPort, nodeAEndpointV6, nodeAEndpoint2V6), + }, + ExternalIDs: loadBalancerExternalIDsForNetwork(namespacedServiceName(ns, serviceName), l2UDN.GetNetworkName()), + }, + &nbdb.LoadBalancer{ + UUID: nodeMergedTemplateLoadBalancerNameForNetwork(ns, serviceName, v1.IPv4Protocol, l2UDN), + Name: nodeMergedTemplateLoadBalancerNameForNetwork(ns, serviceName, v1.IPv4Protocol, l2UDN), + Options: templateServicesOptions(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + "^NODEIP_IPv4_1:30123": formatEndpoints(outPort, nodeAEndpoint, nodeAEndpoint2), + "^NODEIP_IPv4_2:30123": formatEndpoints(outPort, nodeAEndpoint, nodeAEndpoint2), + "^NODEIP_IPv4_0:30123": formatEndpoints(outPort, nodeAEndpoint, nodeAEndpoint2), + }, + ExternalIDs: loadBalancerExternalIDsForNetwork(namespacedServiceName(ns, serviceName), l2UDN.GetNetworkName()), + }, + &nbdb.LoadBalancer{ + UUID: nodeMergedTemplateLoadBalancerNameForNetwork(ns, serviceName, v1.IPv6Protocol, l2UDN), + Name: nodeMergedTemplateLoadBalancerNameForNetwork(ns, serviceName, v1.IPv6Protocol, l2UDN), + Options: templateServicesOptionsV6(), + Protocol: &nbdb.LoadBalancerProtocolTCP, + Vips: map[string]string{ + "^NODEIP_IPv6_1:30123": formatEndpoints(outPort, nodeAEndpointV6, nodeAEndpoint2V6), + "^NODEIP_IPv6_0:30123": formatEndpoints(outPort, nodeAEndpointV6, nodeAEndpoint2V6), + }, + ExternalIDs: loadBalancerExternalIDsForNetwork(namespacedServiceName(ns, serviceName), l2UDN.GetNetworkName()), + }, + + nodeLogicalSwitchForNetwork("", initialLsGroups, l2UDN), + nodeLogicalRouterForNetwork(nodeAInfo.name, initialLrGroups, l2UDN), + + nodeLogicalSwitch(nodeAInfo.name, initialLsGroups), + nodeLogicalRouter(nodeAInfo.name, initialLrGroups), + + lbGroup(types.ClusterLBGroupName), + lbGroup(types.ClusterSwitchLBGroupName), + lbGroup(types.ClusterRouterLBGroupName), + + lbGroupForNetwork(types.ClusterLBGroupName, l2UDN, clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, l2UDN)), + lbGroupForNetwork(types.ClusterSwitchLBGroupName, + l2UDN, + nodeMergedTemplateLoadBalancerNameForNetwork(ns, serviceName, v1.IPv4Protocol, l2UDN), + nodeMergedTemplateLoadBalancerNameForNetwork(ns, serviceName, v1.IPv6Protocol, l2UDN)), + lbGroupForNetwork(types.ClusterRouterLBGroupName, + l2UDN, + nodeMergedTemplateLoadBalancerNameForNetwork(ns, serviceName, v1.IPv4Protocol, l2UDN), + nodeMergedTemplateLoadBalancerNameForNetwork(ns, serviceName, v1.IPv6Protocol, l2UDN)), + + &nbdb.ChassisTemplateVar{ + UUID: nodeAInfo.chassisID, Chassis: nodeAInfo.chassisID, + Variables: map[string]string{ + makeLBNodeIPTemplateNamePrefix(v1.IPv4Protocol) + "0": nodeAMultiAddressesV4[0], + makeLBNodeIPTemplateNamePrefix(v1.IPv4Protocol) + "1": nodeAMultiAddressesV4[1], + makeLBNodeIPTemplateNamePrefix(v1.IPv4Protocol) + "2": nodeAMultiAddressesV4[2], + + makeLBNodeIPTemplateNamePrefix(v1.IPv6Protocol) + "0": nodeAMultiAddressesV6[0], + makeLBNodeIPTemplateNamePrefix(v1.IPv6Protocol) + "1": nodeAMultiAddressesV6[1], + }, + }, + }, }, - nodeMergedTemplateLoadBalancer(nodePort, serviceName, ns, outport, nodeAEndpointIP, nodeBEndpointIP), - nodeLogicalSwitch(nodeA, initialLsGroups), - nodeLogicalSwitch(nodeB, initialLsGroups), - nodeLogicalRouter(nodeA, initialLrGroups), - nodeLogicalRouter(nodeB, initialLrGroups), - lbGroup(types.ClusterLBGroupName, loadBalancerClusterWideTCPServiceName(ns, serviceName)), - lbGroup(types.ClusterSwitchLBGroupName, nodeMergedTemplateLoadBalancerName(ns, serviceName, v1.IPv4Protocol)), - lbGroup(types.ClusterRouterLBGroupName, nodeMergedTemplateLoadBalancerName(ns, serviceName, v1.IPv4Protocol)), - nodeIPTemplate(firstNode), - nodeIPTemplate(secondNode), }, }, } for i, tt := range tests { - t.Run(fmt.Sprintf("%d_%s", i, tt.name), func(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - if tt.gatewayMode != "" { - globalconfig.Gateway.Mode = globalconfig.GatewayMode(tt.gatewayMode) - } else { - globalconfig.Gateway.Mode = globalconfig.GatewayModeShared - } - - controller, err := newControllerWithDBSetup(libovsdbtest.TestSetup{NBData: tt.initialDb}) - if err != nil { - t.Fatalf("Error creating controller: %v", err) - } - defer controller.close() - // Add objects to the Store - controller.endpointSliceStore.Add(tt.slice) - controller.serviceStore.Add(tt.service) - - controller.nodeTracker.nodes = defaultNodes - controller.RequestFullSync(controller.nodeTracker.getZoneNodes()) + for _, network := range tt.networks { + t.Run(fmt.Sprintf("%d_%s_%s", i, tt.name, network.netInfo.GetNetworkName()), func(t *testing.T) { + + g := gomega.NewGomegaWithT(t) + + // Setup test-dependent parameters (default network vs UDN) + netInfo := network.netInfo + initialDb := network.initialDb + expectedDb := network.expectedDb + dbStateAfterDeleting := network.dbStateAfterDeleting + if !netInfo.IsDefault() { + nodeAInfo.gatewayRouterName = netInfo.GetNetworkScopedGWRouterName(nodeAInfo.gatewayRouterName) + nodeAInfo.switchName = netInfo.GetNetworkScopedSwitchName(nodeAInfo.switchName) + nodeBInfo.gatewayRouterName = netInfo.GetNetworkScopedGWRouterName(nodeBInfo.gatewayRouterName) + nodeBInfo.switchName = netInfo.GetNetworkScopedSwitchName(nodeBInfo.switchName) + + } + + if tt.gatewayMode != "" { + globalconfig.Gateway.Mode = globalconfig.GatewayMode(tt.gatewayMode) + } else { + globalconfig.Gateway.Mode = globalconfig.GatewayModeShared + } + + if tt.enableIPv6 { + globalconfig.IPv6Mode = true + defer func() { globalconfig.IPv6Mode = false }() + } + + // Create services controller + var controller *serviceController + var err error + + controller, err = newControllerWithDBSetupForNetwork(libovsdbtest.TestSetup{NBData: initialDb}, netInfo, ns) + if err != nil { + t.Fatalf("Error creating controller: %v", err) + } + if err := controller.nadController.Start(); err != nil { + t.Fatalf("Error starting NAD controller: %v", err) + } + defer controller.nadController.Stop() + defer controller.close() + + // Add k8s objects + for _, slice := range tt.slices { + controller.endpointSliceStore.Add(&slice) + } + controller.serviceStore.Add(tt.service) + + // Setup node tracker + controller.nodeTracker.nodes = map[string]nodeInfo{} + if tt.nodeAInfo != nil { + controller.nodeTracker.nodes[nodeA] = *tt.nodeAInfo + } + if tt.nodeBInfo != nil { + controller.nodeTracker.nodes[nodeB] = *tt.nodeBInfo + } + + // Add mirrored endpoint slices when the controller runs on a UDN + if !netInfo.IsDefault() { + for _, slice := range tt.slices { + controller.endpointSliceStore.Add(kubetest.MirrorEndpointSlice(&slice, netInfo.GetNetworkName(), true)) + } + } + + // Trigger services controller + controller.RequestFullSync(controller.nodeTracker.getZoneNodes()) + + err = controller.syncService(namespacedServiceName(ns, serviceName)) + if err != nil { + t.Fatalf("syncServices error: %v", err) + } + + // Check OVN DB + g.Expect(controller.nbClient).To(libovsdbtest.HaveData(expectedDb)) + + // If the test requires a node to be deleted, remove it from the node tracker, + // sync the service controller and check the OVN DB + if tt.nodeToDelete != "" { + controller.nodeTracker.removeNode(tt.nodeToDelete) + + g.Expect(controller.syncService(namespacedServiceName(ns, serviceName))).To(gomega.Succeed()) + + g.Expect(controller.nbClient).To(libovsdbtest.HaveData(dbStateAfterDeleting)) + } + }) + } - err = controller.syncService(ns + "/" + serviceName) - if err != nil { - t.Fatalf("syncServices error: %v", err) - } - - g.Expect(controller.nbClient).To(libovsdbtest.HaveData(tt.expectedDb)) - - if tt.nodeToDelete != nil { - controller.nodeTracker.removeNode(tt.nodeToDelete.name) - g.Expect(controller.syncService(namespacedServiceName(ns, serviceName))).To(gomega.Succeed()) - g.Expect(controller.nbClient).To(libovsdbtest.HaveData(tt.dbStateAfterDeleting)) - } - }) } } -func Test_ETPCluster_NodePort_Service_WithMultipleIPAddresses(t *testing.T) { - g := gomega.NewGomegaWithT(t) - globalconfig.IPv4Mode = true - globalconfig.IPv6Mode = true - _, cidr4, _ := net.ParseCIDR("10.128.0.0/16") - _, cidr6, _ := net.ParseCIDR("fe00:0:0:0:5555::0/64") - globalconfig.Default.ClusterSubnets = []globalconfig.CIDRNetworkEntry{{CIDR: cidr4, HostSubnetLength: 16}, {CIDR: cidr6, HostSubnetLength: 64}} - - nodeName := "node-a" - nodeIPv4 := []net.IP{net.ParseIP("10.1.1.1"), net.ParseIP("10.2.2.2"), net.ParseIP("10.3.3.3")} - nodeIPv6 := []net.IP{net.ParseIP("fd00:0:0:0:1::1"), net.ParseIP("fd00:0:0:0:2::2")} - - nodeA := nodeInfo{ - name: nodeName, - l3gatewayAddresses: []net.IP{nodeIPv4[0], nodeIPv6[0]}, - hostAddresses: append(nodeIPv4, nodeIPv6...), - gatewayRouterName: nodeGWRouterName(nodeName), - switchName: nodeSwitchName(nodeName), - chassisID: nodeName, - zone: types.OvnDefaultZone, - } +func nodeLogicalSwitch(nodeName string, lbGroups []string, namespacedServiceNames ...string) *nbdb.LogicalSwitch { + return nodeLogicalSwitchForNetwork(nodeName, lbGroups, &util.DefaultNetInfo{}, namespacedServiceNames...) +} - svc := &v1.Service{ - ObjectMeta: metav1.ObjectMeta{Name: "svc-foo", Namespace: "namespace1"}, - Spec: v1.ServiceSpec{ - Type: v1.ServiceTypeNodePort, - ClusterIP: "192.168.1.1", - ClusterIPs: []string{"192.168.1.1", "fd00:0:0:0:7777::1"}, - IPFamilies: []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol}, - Selector: map[string]string{"foo": "bar"}, - ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyTypeCluster, - Ports: []v1.ServicePort{{ - Port: 80, - Protocol: v1.ProtocolTCP, - TargetPort: intstr.FromInt(3456), - NodePort: 30123, - }}, - }, - } +func nodeLogicalSwitchForNetwork(nodeName string, lbGroups []string, netInfo util.NetInfo, namespacedServiceNames ...string) *nbdb.LogicalSwitch { + var externalIDs map[string]string + lbGroupsForNetwork := lbGroups - endPointSliceV4 := &discovery.EndpointSlice{ - ObjectMeta: metav1.ObjectMeta{ - Name: svc.Name + "ipv4", - Namespace: svc.Namespace, - Labels: map[string]string{discovery.LabelServiceName: svc.Name}, - }, - Ports: []discovery.EndpointPort{{Protocol: &tcp, Port: &outport}}, - AddressType: discovery.AddressTypeIPv4, - Endpoints: kube_test.MakeReadyEndpointList(nodeName, "10.128.0.2", "10.128.1.2"), - } + switchName := nodeSwitchNameForNetwork(nodeName, netInfo) - endPointSliceV6 := &discovery.EndpointSlice{ - ObjectMeta: metav1.ObjectMeta{ - Name: svc.Name + "ipv6", - Namespace: svc.Namespace, - Labels: map[string]string{discovery.LabelServiceName: svc.Name}, - }, - Ports: []discovery.EndpointPort{{Protocol: &tcp, Port: &outport}}, - AddressType: discovery.AddressTypeIPv6, - Endpoints: kube_test.MakeReadyEndpointList(nodeName, "fe00:0:0:0:5555::2", "fe00:0:0:0:5555::3"), - } - - controller, err := newControllerWithDBSetup(libovsdbtest.TestSetup{NBData: []libovsdbtest.TestData{ - nodeLogicalSwitch(nodeA.name, initialLsGroups), - nodeLogicalRouter(nodeA.name, initialLrGroups), - - lbGroup(types.ClusterLBGroupName), - lbGroup(types.ClusterSwitchLBGroupName), - lbGroup(types.ClusterRouterLBGroupName), - }}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - defer controller.close() - - controller.endpointSliceStore.Add(endPointSliceV4) - controller.endpointSliceStore.Add(endPointSliceV6) - controller.serviceStore.Add(svc) - controller.nodeTracker.nodes = map[string]nodeInfo{nodeA.name: nodeA} - - controller.RequestFullSync(controller.nodeTracker.getZoneNodes()) - err = controller.syncService(svc.Namespace + "/" + svc.Name) - g.Expect(err).ToNot(gomega.HaveOccurred()) - - expectedDb := []libovsdbtest.TestData{ - &nbdb.LoadBalancer{ - UUID: loadBalancerClusterWideTCPServiceName(svc.Namespace, svc.Name), - Name: loadBalancerClusterWideTCPServiceName(svc.Namespace, svc.Name), - Options: servicesOptions(), - Protocol: &nbdb.LoadBalancerProtocolTCP, - Vips: map[string]string{ - "192.168.1.1:80": "10.128.0.2:3456,10.128.1.2:3456", - "[fd00::7777:0:0:1]:80": "[fe00::5555:0:0:2]:3456,[fe00::5555:0:0:3]:3456", - }, - ExternalIDs: serviceExternalIDs(namespacedServiceName(svc.Namespace, svc.Name)), - }, - &nbdb.LoadBalancer{ - UUID: "Service_namespace1/svc-foo_TCP_node_switch_template_IPv4_merged", - Name: "Service_namespace1/svc-foo_TCP_node_switch_template_IPv4_merged", - Options: templateServicesOptions(), - Protocol: &nbdb.LoadBalancerProtocolTCP, - Vips: map[string]string{ - "^NODEIP_IPv4_1:30123": "10.128.0.2:3456,10.128.1.2:3456", - "^NODEIP_IPv4_2:30123": "10.128.0.2:3456,10.128.1.2:3456", - "^NODEIP_IPv4_0:30123": "10.128.0.2:3456,10.128.1.2:3456", - }, - ExternalIDs: serviceExternalIDs(namespacedServiceName(svc.Namespace, svc.Name)), - }, - &nbdb.LoadBalancer{ - UUID: "Service_namespace1/svc-foo_TCP_node_switch_template_IPv6_merged", - Name: "Service_namespace1/svc-foo_TCP_node_switch_template_IPv6_merged", - Options: templateServicesOptionsV6(), - Protocol: &nbdb.LoadBalancerProtocolTCP, - Vips: map[string]string{ - "^NODEIP_IPv6_1:30123": "[fe00::5555:0:0:2]:3456,[fe00::5555:0:0:3]:3456", - "^NODEIP_IPv6_0:30123": "[fe00::5555:0:0:2]:3456,[fe00::5555:0:0:3]:3456", - }, - ExternalIDs: serviceExternalIDs(namespacedServiceName(svc.Namespace, svc.Name)), - }, - nodeLogicalSwitch(nodeA.name, initialLsGroups), - nodeLogicalRouter(nodeA.name, initialLrGroups), - lbGroup(types.ClusterLBGroupName, loadBalancerClusterWideTCPServiceName(svc.Namespace, svc.Name)), - lbGroup(types.ClusterSwitchLBGroupName, - "Service_namespace1/svc-foo_TCP_node_switch_template_IPv4_merged", - "Service_namespace1/svc-foo_TCP_node_switch_template_IPv6_merged"), - lbGroup(types.ClusterRouterLBGroupName, - "Service_namespace1/svc-foo_TCP_node_switch_template_IPv4_merged", - "Service_namespace1/svc-foo_TCP_node_switch_template_IPv6_merged"), - - &nbdb.ChassisTemplateVar{ - UUID: nodeA.chassisID, Chassis: nodeA.chassisID, - Variables: map[string]string{ - makeLBNodeIPTemplateNamePrefix(v1.IPv4Protocol) + "0": nodeIPv4[0].String(), - makeLBNodeIPTemplateNamePrefix(v1.IPv4Protocol) + "1": nodeIPv4[1].String(), - makeLBNodeIPTemplateNamePrefix(v1.IPv4Protocol) + "2": nodeIPv4[2].String(), - - makeLBNodeIPTemplateNamePrefix(v1.IPv6Protocol) + "0": nodeIPv6[0].String(), - makeLBNodeIPTemplateNamePrefix(v1.IPv6Protocol) + "1": nodeIPv6[1].String(), - }, - }, + if netInfo.IsPrimaryNetwork() { + for _, lbGroup := range lbGroups { + lbGroupsForNetwork = append(lbGroupsForNetwork, netInfo.GetNetworkScopedLoadBalancerGroupName(lbGroup)) + } + externalIDs = getExternalIDsForNetwork(netInfo.GetNetworkName()) } - - g.Expect(controller.nbClient).To(libovsdbtest.HaveData(expectedDb)) - -} - -func nodeLogicalSwitch(nodeName string, lbGroups []string, namespacedServiceNames ...string) *nbdb.LogicalSwitch { ls := &nbdb.LogicalSwitch{ - UUID: nodeSwitchName(nodeName), - Name: nodeSwitchName(nodeName), - LoadBalancerGroup: lbGroups, + UUID: switchName, + Name: switchName, + LoadBalancerGroup: lbGroupsForNetwork, + ExternalIDs: externalIDs, } + if len(namespacedServiceNames) > 0 { ls.LoadBalancer = namespacedServiceNames } @@ -669,29 +1575,61 @@ func nodeLogicalSwitch(nodeName string, lbGroups []string, namespacedServiceName } func nodeLogicalRouter(nodeName string, lbGroups []string, namespacedServiceNames ...string) *nbdb.LogicalRouter { + return nodeLogicalRouterForNetwork(nodeName, lbGroups, &util.DefaultNetInfo{}, namespacedServiceNames...) +} + +func nodeLogicalRouterForNetwork(nodeName string, lbGroups []string, netInfo util.NetInfo, namespacedServiceNames ...string) *nbdb.LogicalRouter { + var externalIDs map[string]string + lbGroupsForNetwork := lbGroups + + routerName := nodeGWRouterNameForNetwork(nodeName, netInfo) + + if netInfo.IsPrimaryNetwork() { + for _, lbGroup := range lbGroups { + lbGroupsForNetwork = append(lbGroupsForNetwork, netInfo.GetNetworkScopedLoadBalancerGroupName(lbGroup)) + } + externalIDs = getExternalIDsForNetwork(netInfo.GetNetworkName()) + } + lr := &nbdb.LogicalRouter{ - UUID: nodeGWRouterName(nodeName), - Name: nodeGWRouterName(nodeName), + UUID: routerName, + Name: routerName, LoadBalancerGroup: lbGroups, + ExternalIDs: externalIDs, } + if len(namespacedServiceNames) > 0 { lr.LoadBalancer = namespacedServiceNames } + return lr } func nodeSwitchName(nodeName string) string { - return fmt.Sprintf("switch-%s", nodeName) + return nodeSwitchNameForNetwork(nodeName, &util.DefaultNetInfo{}) +} + +func nodeSwitchNameForNetwork(nodeName string, netInfo util.NetInfo) string { + return netInfo.GetNetworkScopedSwitchName(fmt.Sprintf("switch-%s", nodeName)) } func nodeGWRouterName(nodeName string) string { - return fmt.Sprintf("gr-%s", nodeName) + return nodeGWRouterNameForNetwork(nodeName, &util.DefaultNetInfo{}) +} + +func nodeGWRouterNameForNetwork(nodeName string, netInfo util.NetInfo) string { + return netInfo.GetNetworkScopedGWRouterName(fmt.Sprintf("gr-%s", nodeName)) } func lbGroup(name string, namespacedServiceNames ...string) *nbdb.LoadBalancerGroup { + return lbGroupForNetwork(name, &util.DefaultNetInfo{}, namespacedServiceNames...) +} + +func lbGroupForNetwork(name string, netInfo util.NetInfo, namespacedServiceNames ...string) *nbdb.LoadBalancerGroup { + LBGroupName := netInfo.GetNetworkScopedLoadBalancerGroupName(name) lbg := &nbdb.LoadBalancerGroup{ - UUID: name, - Name: name, + UUID: LBGroupName, + Name: LBGroupName, } if len(namespacedServiceNames) > 0 { lbg.LoadBalancer = namespacedServiceNames @@ -707,36 +1645,53 @@ func namespacedServiceName(ns string, name string) string { return fmt.Sprintf("%s/%s", ns, name) } -func nodeSwitchRouterLoadBalancerName(nodeName string, serviceNamespace string, serviceName string) string { - return fmt.Sprintf( +func clusterWideTCPServiceLoadBalancerName(ns string, serviceName string) string { + return clusterWideTCPServiceLoadBalancerNameForNetwork(ns, serviceName, &util.DefaultNetInfo{}) +} + +func clusterWideTCPServiceLoadBalancerNameForNetwork(ns string, serviceName string, netInfo util.NetInfo) string { + baseName := fmt.Sprintf("Service_%s_TCP_cluster", namespacedServiceName(ns, serviceName)) + return netInfo.GetNetworkScopedLoadBalancerName(baseName) +} + +func nodeSwitchRouterLoadBalancerNameForNetwork(nodeName string, serviceNamespace string, serviceName string, netInfo util.NetInfo) string { + baseName := fmt.Sprintf( "Service_%s/%s_TCP_node_router+switch_%s", serviceNamespace, serviceName, nodeName) + return netInfo.GetNetworkScopedLoadBalancerName(baseName) } -func nodeSwitchTemplateLoadBalancerName(serviceNamespace string, serviceName string, addressFamily v1.IPFamily) string { - return fmt.Sprintf( +func nodeSwitchTemplateLoadBalancerNameForNetwork(serviceNamespace string, serviceName string, addressFamily v1.IPFamily, netInfo util.NetInfo) string { + baseName := fmt.Sprintf( "Service_%s/%s_TCP_node_switch_template_%s", serviceNamespace, serviceName, addressFamily) + return netInfo.GetNetworkScopedLoadBalancerName(baseName) } -func nodeRouterTemplateLoadBalancerName(serviceNamespace string, serviceName string, addressFamily v1.IPFamily) string { - return fmt.Sprintf( +func nodeRouterTemplateLoadBalancerNameForNetwork(serviceNamespace string, serviceName string, addressFamily v1.IPFamily, netInfo util.NetInfo) string { + baseName := fmt.Sprintf( "Service_%s/%s_TCP_node_router_template_%s", serviceNamespace, serviceName, addressFamily) + return netInfo.GetNetworkScopedLoadBalancerName(baseName) } func nodeMergedTemplateLoadBalancerName(serviceNamespace string, serviceName string, addressFamily v1.IPFamily) string { - return fmt.Sprintf( + return nodeMergedTemplateLoadBalancerNameForNetwork(serviceNamespace, serviceName, addressFamily, &util.DefaultNetInfo{}) +} + +func nodeMergedTemplateLoadBalancerNameForNetwork(serviceNamespace string, serviceName string, addressFamily v1.IPFamily, netInfo util.NetInfo) string { + baseName := fmt.Sprintf( "Service_%s/%s_TCP_node_switch_template_%s_merged", serviceNamespace, serviceName, addressFamily) + return netInfo.GetNetworkScopedLoadBalancerName(baseName) } func servicesOptions() map[string]string { @@ -777,39 +1732,29 @@ func tcpGatewayRouterExternalIDs() map[string]string { } } -func serviceExternalIDs(namespacedServiceName string) map[string]string { +func getExternalIDsForNetwork(network string) map[string]string { + if network == types.DefaultNetworkName { + return map[string]string{} + } + return map[string]string{ - types.LoadBalancerKindExternalID: "Service", - types.LoadBalancerOwnerExternalID: namespacedServiceName, + types.NetworkRoleExternalID: types.NetworkRolePrimary, + types.NetworkExternalID: network, } } -func nodeSwitchTemplateLoadBalancer(nodePort int32, serviceName string, serviceNamespace string) *nbdb.LoadBalancer { - nodeTemplateIP := makeTemplate(makeLBNodeIPTemplateNamePrefix(v1.IPv4Protocol) + "0") - return &nbdb.LoadBalancer{ - UUID: nodeSwitchTemplateLoadBalancerName(serviceNamespace, serviceName, v1.IPv4Protocol), - Name: nodeSwitchTemplateLoadBalancerName(serviceNamespace, serviceName, v1.IPv4Protocol), - Options: templateServicesOptions(), - Protocol: &nbdb.LoadBalancerProtocolTCP, - Vips: map[string]string{ - endpoint(refTemplate(nodeTemplateIP.Name), nodePort): refTemplate(makeTarget(serviceName, serviceNamespace, "TCP", nodePort, "node_switch_template", v1.IPv4Protocol)), - }, - ExternalIDs: serviceExternalIDs(namespacedServiceName(serviceNamespace, serviceName)), - } +func loadBalancerExternalIDs(namespacedServiceName string) map[string]string { + return loadBalancerExternalIDsForNetwork(namespacedServiceName, types.DefaultNetworkName) } -func nodeRouterTemplateLoadBalancer(nodePort int32, serviceName string, serviceNamespace string) *nbdb.LoadBalancer { - nodeTemplateIP := makeTemplate(makeLBNodeIPTemplateNamePrefix(v1.IPv4Protocol) + "0") - return &nbdb.LoadBalancer{ - UUID: nodeRouterTemplateLoadBalancerName(serviceNamespace, serviceName, v1.IPv4Protocol), - Name: nodeRouterTemplateLoadBalancerName(serviceNamespace, serviceName, v1.IPv4Protocol), - Options: templateServicesOptions(), - Protocol: &nbdb.LoadBalancerProtocolTCP, - Vips: map[string]string{ - endpoint(refTemplate(nodeTemplateIP.Name), nodePort): refTemplate(makeTarget(serviceName, serviceNamespace, "TCP", nodePort, "node_router_template", v1.IPv4Protocol)), - }, - ExternalIDs: serviceExternalIDs(namespacedServiceName(serviceNamespace, serviceName)), +func loadBalancerExternalIDsForNetwork(namespacedServiceName string, network string) map[string]string { + externalIDs := map[string]string{ + types.LoadBalancerKindExternalID: "Service", + types.LoadBalancerOwnerExternalID: namespacedServiceName, } + maps.Copy(externalIDs, getExternalIDsForNetwork(network)) + return externalIDs + } func nodeIPTemplate(node *nodeInfo) *nbdb.ChassisTemplateVar { @@ -823,16 +1768,20 @@ func nodeIPTemplate(node *nodeInfo) *nbdb.ChassisTemplateVar { } func nodeMergedTemplateLoadBalancer(nodePort int32, serviceName string, serviceNamespace string, outputPort int32, endpointIPs ...string) *nbdb.LoadBalancer { + return nodeMergedTemplateLoadBalancerForNetwork(nodePort, serviceName, serviceNamespace, outputPort, &util.DefaultNetInfo{}, endpointIPs...) +} + +func nodeMergedTemplateLoadBalancerForNetwork(nodePort int32, serviceName string, serviceNamespace string, outputPort int32, netInfo util.NetInfo, endpointIPs ...string) *nbdb.LoadBalancer { nodeTemplateIP := makeTemplate(makeLBNodeIPTemplateNamePrefix(v1.IPv4Protocol) + "0") return &nbdb.LoadBalancer{ - UUID: nodeMergedTemplateLoadBalancerName(serviceNamespace, serviceName, v1.IPv4Protocol), - Name: nodeMergedTemplateLoadBalancerName(serviceNamespace, serviceName, v1.IPv4Protocol), + UUID: nodeMergedTemplateLoadBalancerNameForNetwork(serviceNamespace, serviceName, v1.IPv4Protocol, netInfo), + Name: nodeMergedTemplateLoadBalancerNameForNetwork(serviceNamespace, serviceName, v1.IPv4Protocol, netInfo), Options: templateServicesOptions(), Protocol: &nbdb.LoadBalancerProtocolTCP, Vips: map[string]string{ - endpoint(refTemplate(nodeTemplateIP.Name), nodePort): computeEndpoints(outputPort, endpointIPs...), + IPAndPort(refTemplate(nodeTemplateIP.Name), nodePort): formatEndpoints(outputPort, endpointIPs...), }, - ExternalIDs: serviceExternalIDs(namespacedServiceName(serviceNamespace, serviceName)), + ExternalIDs: loadBalancerExternalIDsForNetwork(namespacedServiceName(serviceNamespace, serviceName), netInfo.GetNetworkName()), } } @@ -840,30 +1789,44 @@ func refTemplate(template string) string { return "^" + template } -func makeTarget(serviceName, serviceNamespace string, proto v1.Protocol, outputPort int32, scope string, addressFamily v1.IPFamily) string { - return makeTemplateName( - fmt.Sprintf("Service_%s/%s_%s_%d_%s_%s", - serviceNamespace, serviceName, - proto, outputPort, scope, addressFamily)) -} - -func computeEndpoints(outputPort int32, ips ...string) string { +func formatEndpoints(outputPort int32, ips ...string) string { var endpoints []string for _, ip := range ips { - endpoints = append(endpoints, endpoint(ip, outputPort)) + endpoints = append(endpoints, IPAndPort(ip, outputPort)) } return strings.Join(endpoints, ",") } -func endpoint(ip string, port int32) string { - return fmt.Sprintf("%s:%d", ip, port) +func IPAndPort(ip string, port int32) string { + ipStr := ip + if utilnet.IsIPv6String(ip) { + ipStr = "[" + ip + "]" + } + + return fmt.Sprintf("%s:%d", ipStr, port) } -func nodeConfig(nodeName string, nodeIP string) *nodeInfo { +func getNodeInfo(nodeName string, nodeIPsV4 []string, nodeIPsV6 []string) *nodeInfo { + var gwAddresses []net.IP + ips := []net.IP{} + + if len(nodeIPsV4) > 0 { + gwAddresses = append(gwAddresses, net.ParseIP(nodeIPsV4[0])) + for _, ip := range nodeIPsV4 { + ips = append(ips, net.ParseIP(ip)) + } + } + if len(nodeIPsV6) > 0 { + gwAddresses = append(gwAddresses, net.ParseIP(nodeIPsV6[0])) + for _, ip := range nodeIPsV6 { + ips = append(ips, net.ParseIP(ip)) + } + } + return &nodeInfo{ name: nodeName, - l3gatewayAddresses: []net.IP{net.ParseIP(nodeIP)}, - hostAddresses: []net.IP{net.ParseIP(nodeIP)}, + l3gatewayAddresses: gwAddresses, + hostAddresses: ips, gatewayRouterName: nodeGWRouterName(nodeName), switchName: nodeSwitchName(nodeName), chassisID: nodeName, @@ -911,12 +1874,3 @@ func deleteTestNBGlobal(nbClient libovsdbclient.Client, zone string) error { return nil } - -func readyEndpointsWithAddresses(addresses ...string) discovery.Endpoint { - return discovery.Endpoint{ - Conditions: discovery.EndpointConditions{ - Ready: utilpointer.Bool(true), - }, - Addresses: addresses, - } -} diff --git a/go-controller/pkg/ovn/controller/services/svc_template_var.go b/go-controller/pkg/ovn/controller/services/svc_template_var.go index 8a0c1fafcf..2309efe139 100644 --- a/go-controller/pkg/ovn/controller/services/svc_template_var.go +++ b/go-controller/pkg/ovn/controller/services/svc_template_var.go @@ -10,6 +10,7 @@ import ( libovsdb "github.com/ovn-org/libovsdb/ovsdb" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" corev1 "k8s.io/api/core/v1" ) @@ -188,10 +189,10 @@ func isLBNodeIPTemplateName(name string) bool { // makeLBTargetTemplateName builds a load balancer target template name. func makeLBTargetTemplateName(service *corev1.Service, proto corev1.Protocol, port int32, - family corev1.IPFamily, scope string) string { + family corev1.IPFamily, scope string, netInfo util.NetInfo) string { return makeTemplateName( - makeLBName(service, proto, - fmt.Sprintf("%d_%s_%v", port, scope, family))) + makeLBNameForNetwork(service, proto, + fmt.Sprintf("%d_%s_%v", port, scope, family), netInfo)) } // getTemplatesFromRulesTargets returns the map of template variables referred diff --git a/go-controller/pkg/ovn/controller/services/utils.go b/go-controller/pkg/ovn/controller/services/utils.go index b158ba59cf..de057fd7e1 100644 --- a/go-controller/pkg/ovn/controller/services/utils.go +++ b/go-controller/pkg/ovn/controller/services/utils.go @@ -4,6 +4,11 @@ import ( "net" globalconfig "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + + v1 "k8s.io/api/core/v1" + ktypes "k8s.io/apimachinery/pkg/types" ) // hasHostEndpoints determines if a slice of endpoints contains a host networked pod @@ -25,3 +30,21 @@ func IsHostEndpoint(endpointIP string) bool { } return true } + +func getExternalIDsForLoadBalancer(service *v1.Service, netInfo util.NetInfo) map[string]string { + nsn := ktypes.NamespacedName{Namespace: service.Namespace, Name: service.Name} + + externalIDs := map[string]string{ + types.LoadBalancerOwnerExternalID: nsn.String(), + types.LoadBalancerKindExternalID: "Service", + } + + if netInfo.IsDefault() { + return externalIDs + } + + externalIDs[types.NetworkExternalID] = netInfo.GetNetworkName() + externalIDs[types.NetworkRoleExternalID] = util.GetUserDefinedNetworkRole(netInfo.IsPrimaryNetwork()) + + return externalIDs +} diff --git a/go-controller/pkg/ovn/controller/services/utils_test.go b/go-controller/pkg/ovn/controller/services/utils_test.go new file mode 100644 index 0000000000..4e49bdcbd1 --- /dev/null +++ b/go-controller/pkg/ovn/controller/services/utils_test.go @@ -0,0 +1,76 @@ +package services + +import ( + "testing" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + discovery "k8s.io/api/discovery/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestExternalIDsForLoadBalancer(t *testing.T) { + name := "svc-ab23" + namespace := "ns" + defaultNetInfo := util.DefaultNetInfo{} + config.IPv4Mode = true + UDNNetInfo, err := getSampleUDNNetInfo(namespace, "layer3") + assert.Equal(t, err, nil) + assert.Equal(t, + map[string]string{ + types.LoadBalancerKindExternalID: "Service", + types.LoadBalancerOwnerExternalID: "ns/svc-ab23", + }, + getExternalIDsForLoadBalancer(&v1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{discovery.LabelServiceName: "svc"}, + }, + }, &defaultNetInfo), + ) + + assert.Equal(t, + map[string]string{ + types.LoadBalancerKindExternalID: "Service", + types.LoadBalancerOwnerExternalID: "ns/svc-ab23", + }, + getExternalIDsForLoadBalancer(&v1.Service{ + // also handle no TypeMeta, which can happen. + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{discovery.LabelServiceName: "svc"}, + }, + }, &defaultNetInfo), + ) + + assert.Equal(t, + map[string]string{ + types.LoadBalancerKindExternalID: "Service", + types.LoadBalancerOwnerExternalID: "ns/svc-ab23", + types.NetworkExternalID: UDNNetInfo.GetNetworkName(), + types.NetworkRoleExternalID: types.NetworkRolePrimary, + }, + getExternalIDsForLoadBalancer(&v1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{discovery.LabelServiceName: "svc"}, + }, + }, UDNNetInfo), + ) + +} diff --git a/go-controller/pkg/ovn/controller/udnenabledsvc/udn_enabled_svc.go b/go-controller/pkg/ovn/controller/udnenabledsvc/udn_enabled_svc.go new file mode 100644 index 0000000000..a3b06e7647 --- /dev/null +++ b/go-controller/pkg/ovn/controller/udnenabledsvc/udn_enabled_svc.go @@ -0,0 +1,299 @@ +package udnenabledsvc + +import ( + "fmt" + "slices" + "sync" + "time" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" + addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + ktypes "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + coreinformers "k8s.io/client-go/informers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" +) + +const ( + addressSetName = "udn-enabled-svc-cluster-ips" + controllerName = "udn-enabled-svc" +) + +// Controller watches services as defined with binary argument flag --udn-allowed-default-services. It gathers all the clusterIPs +// from the services defined and updates an address_set with the total set. This address set maybe consumed. It will never be deleted even +// if UDN is disabled because consumers may still reference it for a period of time. +type Controller struct { + // libovsdb northbound client interface + nbClient libovsdbclient.Client + // An address set factory that creates address sets + addressSetFactory addressset.AddressSetFactory + addressSetMu *sync.Mutex + addressSet addressset.AddressSet // stores selected services cluster IPs + serviceInformer coreinformers.ServiceInformer + queue workqueue.TypedRateLimitingInterface[string] + // Exposes UDN enabled service VIPs. Key = namespaced name of service and value represents a string list of clusterIPs. + cache map[string][]string + // services represents the desired list of namespaced name services that the controller will watch and consume all clusterIPs. + // If the service isn't found or doesn't contain at least one clusterIP, then its not added to the cache. + // services is set once and then should only be read only for the lifetime of the controller. + services sets.Set[string] +} + +// NewController creates a new controller to sync UDN enabled services clusterIPs with an OVN address set. Address set is +// never deleted and maybe read-only referenced by consumers. Single worker only for processing events. +func NewController(nbClient libovsdbclient.Client, addressSetFactory addressset.AddressSetFactory, + serviceInformer coreinformers.ServiceInformer, services []string) *Controller { + + serviceSet := sets.New[string]() + serviceSet.Insert(services...) + + return &Controller{ + nbClient: nbClient, + addressSetFactory: addressSetFactory, + addressSetMu: &sync.Mutex{}, + serviceInformer: serviceInformer, + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemFastSlowRateLimiter[string](time.Second, 5*time.Second, 5), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "udnenabledservice"}, + ), + cache: make(map[string][]string), + services: serviceSet, + } +} + +// Run adds event handlers and starts a single worker to copy all UDN enabled services VIPs to an OVN address set and expose that +// address set to consumers. It will block until stop channel is closed. +func (c *Controller) Run(stopCh <-chan struct{}) error { + defer c.queue.ShutDown() + // ensure service informer is sync'd + klog.Info("Waiting for service informer to sync") + if ok := cache.WaitForCacheSync(stopCh, c.serviceInformer.Informer().HasSynced); !ok { + return nil + } + handler, err := c.serviceInformer.Informer().AddEventHandler(factory.WithUpdateHandlingForObjReplace(cache.ResourceEventHandlerFuncs{ + AddFunc: c.onServiceAdd, + UpdateFunc: c.onServiceUpdate, + DeleteFunc: c.onServiceDelete, + })) + if err != nil { + return fmt.Errorf("failed to add event handler: %v", err) + } + if err = c.ensureAddressSet(); err != nil { + return fmt.Errorf("failed to ensure UDN enabled services address set: %v", err) + } + klog.Info("Performing full resync") + if err = c.fullResync(); err != nil { + return fmt.Errorf("failed to run UDN enabled services controller because repairing failed: %v", err) + } + klog.Info("Waiting for handler to sync") + if ok := cache.WaitForCacheSync(stopCh, handler.HasSynced); !ok { + return nil + } + defer klog.Info("UDN enabled services controller ended") + klog.Info("Starting worker") + go wait.Until(c.worker, time.Second, stopCh) + <-stopCh + return nil +} + +// GetAddressSetDBIDs returns the DB IDs of the address set managed by this controller - an address set for each IP family enabled. +func GetAddressSetDBIDs() *libovsdbops.DbObjectIDs { + return libovsdbops.NewDbObjectIDs(libovsdbops.AddressSetUDNEnabledService, controllerName, map[libovsdbops.ExternalIDKey]string{ + libovsdbops.ObjectNameKey: addressSetName, + }) +} + +func (c *Controller) IsAddressSetAvailable() bool { + c.addressSetMu.Lock() + defer c.addressSetMu.Unlock() + return c.addressSet != nil +} + +// getAddressSetHashNames returns the address set hash name. Address set will persist if created and will never be removed +func (c *Controller) getAddressSetHashNames() (string, string) { + c.addressSetMu.Lock() + defer c.addressSetMu.Unlock() + if c.addressSet == nil { + panic("Run() must be called before attempting to get address set hash names") + } + return c.addressSet.GetASHashNames() +} + +func (c *Controller) ensureAddressSet() error { + var err error + c.addressSetMu.Lock() + c.addressSet, err = c.addressSetFactory.EnsureAddressSet(GetAddressSetDBIDs()) + c.addressSetMu.Unlock() + return err +} + +func (c *Controller) addToAddressSet(addresses ...string) error { + c.addressSetMu.Lock() + defer c.addressSetMu.Unlock() + return c.addressSet.AddAddresses(addresses) +} + +func (c *Controller) deleteFromAddressSet(addresses ...string) error { + c.addressSetMu.Lock() + defer c.addressSetMu.Unlock() + return c.addressSet.DeleteAddresses(addresses) +} + +func (c *Controller) fullResync() error { + services, err := c.serviceInformer.Lister().List(labels.Everything()) + if err != nil { + return fmt.Errorf("failed to list all services: %v", err) + } + var allClusterIPs []string + for _, service := range services { + namespacedName := ktypes.NamespacedName{Namespace: service.Namespace, Name: service.Name}.String() + if !util.IsUDNEnabledService(namespacedName) { + continue + } + if len(service.Spec.ClusterIPs) == 0 { + continue + } + allClusterIPs = append(allClusterIPs, service.Spec.ClusterIPs...) + clusterIPs := make([]string, 0, len(service.Spec.ClusterIPs)) + copy(clusterIPs, service.Spec.ClusterIPs) + c.cache[namespacedName] = clusterIPs + } + return c.addressSet.SetAddresses(allClusterIPs) +} + +func (c *Controller) worker() { + for c.processNextWorkItem() { + } +} + +func (c *Controller) processNextWorkItem() bool { + key, done := c.queue.Get() + if done { + return false + } + defer c.queue.Done(key) + err := c.syncService(key) + c.handleErr(err, key) + return true +} + +func (c *Controller) handleErr(err error, key interface{}) { + keyStr := key.(string) + if err == nil { + c.queue.Forget(keyStr) + return + } + if c.queue.NumRequeues(keyStr) < 15 { + klog.V(2).InfoS("Error syncing UDN enabled service %s, retrying: %v", key, err) + c.queue.AddRateLimited(keyStr) + return + } + klog.Warningf("Dropping UDN enabled service %s out of the queue: %v", key, err) + c.queue.Forget(keyStr) + utilruntime.HandleError(err) +} + +func (c *Controller) syncService(key string) error { + // we don't support cleanup of address set if configuration for UDN enabled services changes during run time. Sync will handle + // full (re)sync during startup. + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return fmt.Errorf("failed to split meta data for service from key %q: %v", key, err) + } + service, err := c.serviceInformer.Lister().Services(namespace).Get(name) + if err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to list service %s/%s: %v", namespace, name, err) + } + cachedClusterIPs, ok := c.cache[key] + if !ok { + cachedClusterIPs = make([]string, 0, len(service.Spec.ClusterIPs)) + } + // service delete case + if service == nil { + if !ok { + return nil + } + if err = c.deleteFromAddressSet(cachedClusterIPs...); err != nil { + return fmt.Errorf("failed to delete clusterIP(s) from address set for service %s: %v", key, err) + } + delete(c.cache, key) + return nil + } + // service add or update case + // cached IPs should be in the same order as what is specified on the .spec.clusterIPs + if slices.Equal(service.Spec.ClusterIPs, cachedClusterIPs) { + return nil + } + // delete IPs which are present in cache but not in service spec clusterIPs + ipsToDel := make([]string, 0) + for _, cachedIP := range cachedClusterIPs { + if slices.Contains(service.Spec.ClusterIPs, cachedIP) { + continue + } + ipsToDel = append(ipsToDel, cachedIP) + } + if err = c.deleteFromAddressSet(ipsToDel...); err != nil { + return fmt.Errorf("failed to add/update address set clusterIP(s) for service %s: %v", key, err) + } + // ensure any new IPs are added + ipsToAdd := make([]string, 0) + for _, wantedIP := range service.Spec.ClusterIPs { + if slices.Contains(cachedClusterIPs, wantedIP) { + continue + } + ipsToAdd = append(ipsToAdd, wantedIP) + } + if err = c.addToAddressSet(ipsToAdd...); err != nil { + return fmt.Errorf("failed to add/update address set clusterIP(s) for service %s: %v", key, err) + } + cachedClusterIPs = make([]string, 0, len(service.Spec.ClusterIPs)) + cachedClusterIPs = append(cachedClusterIPs, service.Spec.ClusterIPs...) + c.cache[key] = cachedClusterIPs + return nil +} + +func (c *Controller) onServiceAdd(obj interface{}) { + key, err := cache.MetaNamespaceKeyFunc(obj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("service add event: couldn't get key for object %+v: %v", obj, err)) + return + } + if !c.services.Has(key) { + return + } + c.queue.AddRateLimited(key) +} + +func (c *Controller) onServiceUpdate(_, newObj interface{}) { + key, err := cache.MetaNamespaceKeyFunc(newObj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("service update event: couldn't get key for object %+v: %v", newObj, err)) + return + } + if !c.services.Has(key) { + return + } + c.queue.AddRateLimited(key) +} + +func (c *Controller) onServiceDelete(obj interface{}) { + key, err := cache.MetaNamespaceKeyFunc(obj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("service delete event: couldn't get key for object %+v: %v", obj, err)) + return + } + if !c.services.Has(key) { + return + } + c.queue.AddRateLimited(key) +} diff --git a/go-controller/pkg/ovn/controller/udnenabledsvc/udn_enabled_svc_test.go b/go-controller/pkg/ovn/controller/udnenabledsvc/udn_enabled_svc_test.go new file mode 100644 index 0000000000..3000053b4a --- /dev/null +++ b/go-controller/pkg/ovn/controller/udnenabledsvc/udn_enabled_svc_test.go @@ -0,0 +1,272 @@ +package udnenabledsvc + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/onsi/gomega" + "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" + libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" +) + +func TestUDNEnabledServices(t *testing.T) { + + const ( + service1Namespace = "default" + service1Name = "kubernetes" + service1NsName = service1Namespace + "/" + service1Name + service2Namespace = "dns" + service2Name = "dns" + service2NsName = service2Namespace + "/" + service2Name + service1ClusterIP1 = "10.96.0.1" + service1ClusterIP2 = "10.96.0.20" + service2ClusterIP1 = "10.96.1.5" + ) + + tests := []struct { + name string // test description + initialServices []runtime.Object // initial services populated within k8 api before controller is running + initialClusterIPs []string // initial cluster IPs added to the address set before the controller is running + afterRunServices []*corev1.Service // services added after controller is running + expectedClusterIPs sets.Set[string] // expected set of clusterIPs that we expect to be consistently present shortly after run controller is invoked + udnEnabledServices []string + }{ + { + name: "no services are specified or selected", + expectedClusterIPs: sets.New[string](), + }, + { + name: "add services before runtime", + initialServices: []runtime.Object{ + getService(service1Namespace, service1Name, service1ClusterIP1, service1ClusterIP2), + getService(service2Namespace, service2Name, service2ClusterIP1), + }, + expectedClusterIPs: sets.New[string](service1ClusterIP1, service1ClusterIP2), + udnEnabledServices: []string{service1NsName}, + }, + { + name: "add services before and after runtime", + initialServices: []runtime.Object{getService(service1Namespace, service1Name, service1ClusterIP1, service1ClusterIP2)}, + afterRunServices: []*corev1.Service{getService(service2Namespace, service2Name, service2ClusterIP1)}, + expectedClusterIPs: sets.New[string](service1ClusterIP1, service1ClusterIP2, service2ClusterIP1), + udnEnabledServices: []string{service1NsName, service2NsName}, + }, + { + name: "add services at runtime", + initialServices: []runtime.Object{}, + afterRunServices: []*corev1.Service{getService(service2Namespace, service2Name, service2ClusterIP1)}, + expectedClusterIPs: sets.New[string](service2ClusterIP1), + udnEnabledServices: []string{service1NsName, service2NsName}, + }, + { + name: "update service at runtime", + initialServices: []runtime.Object{getService(service1Namespace, service1Name, service1ClusterIP1)}, + afterRunServices: []*corev1.Service{getService(service1Namespace, service1Name, service1ClusterIP1, service1ClusterIP2)}, + expectedClusterIPs: sets.New[string](service1ClusterIP1, service1ClusterIP2), + udnEnabledServices: []string{service1NsName, service2NsName}, + }, + { + name: "cleans up stale entries", + initialClusterIPs: []string{service2ClusterIP1}, // service doesn't exist for this VIP + initialServices: []runtime.Object{getService(service1Namespace, service1Name, service1ClusterIP1, service1ClusterIP2)}, + expectedClusterIPs: sets.New[string](service1ClusterIP1, service1ClusterIP2), + udnEnabledServices: []string{service1NsName, service2NsName}, + }, + { + name: "removes clusterIP", + initialServices: []runtime.Object{getService(service1Namespace, service1Name, service1ClusterIP1)}, + afterRunServices: []*corev1.Service{getService(service1Namespace, service1Name)}, + expectedClusterIPs: sets.New[string](), + udnEnabledServices: []string{service1NsName, service2NsName}, + }, + } + + for i, tt := range tests { + t.Run(fmt.Sprintf("%d: %s", i, tt.name), func(t *testing.T) { + g := gomega.NewGomegaWithT(t) + // setup fake NB DB + var initialDB []libovsdbtest.TestData + if len(tt.initialClusterIPs) > 0 { + v4AS, v6AS, err := getAddressSets(tt.initialClusterIPs) + if err != nil { + t.Fatalf("failed to create NB DB address sets: %v", err) + } + initialDB = []libovsdbtest.TestData{ + v4AS, + v6AS, + } + } + nbClient, cleanup, err := libovsdbtest.NewNBTestHarness(libovsdbtest.TestSetup{NBData: initialDB}, nil) + if err != nil { + t.Fatalf("failed to create new NB test harness: %v", err) + } + defer cleanup.Cleanup() + asf := addressset.NewOvnAddressSetFactory(nbClient, true, true) + t.Logf("adding services to kapi before controller is executed") + ovnClient := util.GetOVNClientset(tt.initialServices...).GetOVNKubeControllerClientset() + factoryMock, err := factory.NewOVNKubeControllerWatchFactory(ovnClient) + if err != nil { + t.Fatalf("failed to create new OVN kube controller watch factory: %v", err) + } + if err = factoryMock.Start(); err != nil { + t.Fatalf("failed to start watch factory: %v", err) + } + c := NewController(nbClient, asf, factoryMock.ServiceCoreInformer(), tt.udnEnabledServices) + stopCh := make(chan struct{}) + wg := &sync.WaitGroup{} + // start running the controller + t.Logf("starting the controller") + wg.Add(1) + go func() { + if err := c.Run(stopCh); err != nil { + t.Logf("Run() controller failed: %v", err) + } + wg.Done() + }() + defer func() { + close(stopCh) + wg.Wait() + }() + + // block until address set is created + g.Eventually(c.IsAddressSetAvailable).Should(gomega.BeTrue()) + // create the services post run. If service already exists, update it. + t.Logf("add services to kapi at runtime") + if err = createOrUpdateServices(ovnClient, tt.afterRunServices); err != nil { + t.Fatalf("failed to create or update Kubernetes services: %v", err) + } + t.Logf("ensure address set has the correct set of clusterIPs") + // ensure expected clusterIPs are present within the OVN address set + asName1, asName2 := c.getAddressSetHashNames() + g.Eventually(func() error { + clusterIPs, err := getAllAddressesFromAddressSets(nbClient, asName1, asName2) + if err != nil { + t.Fatalf("failed to get all address set addresses: %v", err) + } + if !tt.expectedClusterIPs.Equal(clusterIPs) { + return fmt.Errorf("expected: %v, actual: %v, diff: %v", tt.expectedClusterIPs.UnsortedList(), + clusterIPs.UnsortedList(), tt.expectedClusterIPs.Difference(clusterIPs)) + } + return nil + }).WithTimeout(10 * time.Second).Should(gomega.Succeed()) + t.Logf("delete all services and ensure address map is empty") + for ns, name := range map[string]string{service1Namespace: service1Name, service2Namespace: service2Name} { + err = ovnClient.KubeClient.CoreV1().Services(ns).Delete(context.Background(), name, metav1.DeleteOptions{}) + if err != nil && !errors.IsNotFound(err) { + t.Fatalf("failed to ensure service %s/%s is deleted: %v", ns, name, err) + } + } + t.Logf("ensure address set is empty") + g.Eventually(func() int { + clusterIPs, err := getAllAddressesFromAddressSets(nbClient, asName1, asName2) + if err != nil { + t.Fatalf("failed to get all address set addresses: %v", err) + } + return clusterIPs.Len() + }).WithTimeout(10 * time.Second).Should(gomega.BeZero()) + }) + } +} + +func getAllAddressesFromAddressSets(nbClient client.Client, asNames ...string) (sets.Set[string], error) { + addressSets, err := libovsdbops.FindAddressSetsWithPredicate(nbClient, func(set *nbdb.AddressSet) bool { + for _, asName := range asNames { + if asName == set.Name { + return true + } + } + return false + }) + if err != nil { + return nil, fmt.Errorf("failed to get address sets from NB DB: %v", err) + } + // compare addresses found with expected address - success is when they are equal + asClusterIPs := sets.New[string]() + for _, addressSet := range addressSets { + asClusterIPs.Insert(addressSet.Addresses...) + } + return asClusterIPs, nil +} + +func createOrUpdateServices(client *util.OVNKubeControllerClientset, services []*corev1.Service) error { + // create the services post run. If service already exists, update it. + for _, service := range services { + _, err := client.KubeClient.CoreV1().Services(service.Namespace).Get(context.Background(), service.Name, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + _, err = client.KubeClient.CoreV1().Services(service.Namespace).Create(context.Background(), service, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create service %s/%s: %v", service.Namespace, service.Name, err) + } + } else { + return fmt.Errorf("failed to check if service is already created: %v", err) + } + } else { + _, err = client.KubeClient.CoreV1().Services(service.Namespace).Update(context.Background(), service, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to update service %s/%s: %v", service.Namespace, service.Name, err) + } + } + } + return nil +} + +func getService(namespace, name string, clusterIPs ...string) *corev1.Service { + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}, + Spec: corev1.ServiceSpec{ + ClusterIPs: clusterIPs, + }, + } +} + +func getAddressSets(addresses []string) (*nbdb.AddressSet, *nbdb.AddressSet, error) { + v4Addresses, err := util.MatchAllIPStringFamily(false, addresses) + if err != nil { + if err != util.ErrorNoIP { + return nil, nil, err + } + v4Addresses = make([]string, 0) + } + v6Addresses, err := util.MatchAllIPStringFamily(true, addresses) + if err != nil { + if err != util.ErrorNoIP { + return nil, nil, err + } + v6Addresses = make([]string, 0) + } + + v4DBIDs := GetAddressSetDBIDs() + v4DBIDs = v4DBIDs.AddIDs(map[libovsdbops.ExternalIDKey]string{libovsdbops.IPFamilyKey: "v4"}) + + v4AS := &nbdb.AddressSet{ + UUID: v4DBIDs.String(), + Addresses: v4Addresses, + ExternalIDs: v4DBIDs.GetExternalIDs(), + Name: v4DBIDs.String(), + } + + v6DBIDs := GetAddressSetDBIDs() + v6DBIDs = v6DBIDs.AddIDs(map[libovsdbops.ExternalIDKey]string{libovsdbops.IPFamilyKey: "v6"}) + + v6AS := &nbdb.AddressSet{ + UUID: v6DBIDs.String(), + Addresses: v6Addresses, + ExternalIDs: v6DBIDs.GetExternalIDs(), + Name: v6DBIDs.String(), + } + return v4AS, v6AS, nil +} diff --git a/go-controller/pkg/ovn/controller/unidling/unidle_test.go b/go-controller/pkg/ovn/controller/unidling/unidle_test.go index 4328b6666d..3b415c0f63 100644 --- a/go-controller/pkg/ovn/controller/unidling/unidle_test.go +++ b/go-controller/pkg/ovn/controller/unidling/unidle_test.go @@ -18,7 +18,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/ovn/default_network_controller.go b/go-controller/pkg/ovn/default_network_controller.go index a3d48d1a4c..e61ec29c2c 100644 --- a/go-controller/pkg/ovn/default_network_controller.go +++ b/go-controller/pkg/ovn/default_network_controller.go @@ -13,9 +13,9 @@ import ( egressipv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1" egressqoslisters "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/listers/egressqos/v1" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" - libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + nad "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/network-attach-def-controller" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/observability" addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" anpcontroller "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/controller/admin_network_policy" apbroutecontroller "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/controller/apbroute" @@ -25,6 +25,8 @@ import ( dnsnameresolver "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/dns_name_resolver" aclsyncer "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/external_ids_syncer/acl" addrsetsyncer "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/external_ids_syncer/address_set" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/external_ids_syncer/logical_router_policy" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/external_ids_syncer/nat" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/external_ids_syncer/port_group" lsm "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/logical_switch_manager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/topology" @@ -64,16 +66,16 @@ type DefaultNetworkController struct { // EgressQoS egressQoSLister egressqoslisters.EgressQoSLister egressQoSSynced cache.InformerSynced - egressQoSQueue workqueue.RateLimitingInterface + egressQoSQueue workqueue.TypedRateLimitingInterface[string] egressQoSCache sync.Map egressQoSPodLister corev1listers.PodLister egressQoSPodSynced cache.InformerSynced - egressQoSPodQueue workqueue.RateLimitingInterface + egressQoSPodQueue workqueue.TypedRateLimitingInterface[string] egressQoSNodeLister corev1listers.NodeLister egressQoSNodeSynced cache.InformerSynced - egressQoSNodeQueue workqueue.RateLimitingInterface + egressQoSNodeQueue workqueue.TypedRateLimitingInterface[string] // Cluster wide Load_Balancer_Group UUID. // Includes all node switches and node gateway routers. @@ -90,11 +92,12 @@ type DefaultNetworkController struct { // Cluster-wide router default Control Plane Protection (COPP) UUID defaultCOPPUUID string + // Controller in charge of services + svcController *svccontroller.Controller + // Controller used for programming OVN for egress IP - eIPC egressIPZoneController + eIPC *EgressIPController - // Controller used to handle services - svcController *svccontroller.Controller // Controller used to handle egress services egressSvcController *egresssvc.Controller // Controller used for programming OVN for Admin Network Policy @@ -142,15 +145,17 @@ type DefaultNetworkController struct { // NewDefaultNetworkController creates a new OVN controller for creating logical network // infrastructure and policy for default l3 network -func NewDefaultNetworkController(cnci *CommonNetworkControllerInfo) (*DefaultNetworkController, error) { +func NewDefaultNetworkController(cnci *CommonNetworkControllerInfo, nadController *nad.NetAttachDefinitionController, + observManager *observability.Manager, portCache *PortCache, eIPController *EgressIPController) (*DefaultNetworkController, error) { stopChan := make(chan struct{}) wg := &sync.WaitGroup{} - return newDefaultNetworkControllerCommon(cnci, stopChan, wg, nil) + return newDefaultNetworkControllerCommon(cnci, stopChan, wg, nil, nadController, observManager, portCache, eIPController) } func newDefaultNetworkControllerCommon(cnci *CommonNetworkControllerInfo, defaultStopChan chan struct{}, defaultWg *sync.WaitGroup, - addressSetFactory addressset.AddressSetFactory) (*DefaultNetworkController, error) { + addressSetFactory addressset.AddressSetFactory, nadController *nad.NetAttachDefinitionController, + observManager *observability.Manager, portCache *PortCache, eIPController *EgressIPController) (*DefaultNetworkController, error) { if addressSetFactory == nil { addressSetFactory = addressset.NewOvnAddressSetFactory(cnci.nbClient, config.IPv4Mode, config.IPv6Mode) @@ -161,7 +166,9 @@ func newDefaultNetworkControllerCommon(cnci *CommonNetworkControllerInfo, cnci.watchFactory.ServiceCoreInformer(), cnci.watchFactory.EndpointSliceCoreInformer(), cnci.watchFactory.NodeCoreInformer(), + nadController, cnci.recorder, + &util.DefaultNetInfo{}, ) if err != nil { return nil, fmt.Errorf("unable to create new service controller while creating new default network controller: %w", err) @@ -188,14 +195,13 @@ func newDefaultNetworkControllerCommon(cnci *CommonNetworkControllerInfo, if err != nil { return nil, fmt.Errorf("unable to create new admin policy based external route controller while creating new default network controller :%w", err) } - oc := &DefaultNetworkController{ BaseNetworkController: BaseNetworkController{ CommonNetworkControllerInfo: *cnci, controllerName: DefaultNetworkControllerName, NetInfo: &util.DefaultNetInfo{}, lsManager: lsm.NewLogicalSwitchManager(), - logicalPortCache: newPortCache(defaultStopChan), + logicalPortCache: portCache, namespaces: make(map[string]*namespaceInfo), namespacesMutex: sync.Mutex{}, addressSetFactory: addressSetFactory, @@ -207,25 +213,16 @@ func newDefaultNetworkControllerCommon(cnci *CommonNetworkControllerInfo, localZoneNodes: &sync.Map{}, zoneICHandler: zoneICHandler, cancelableCtx: util.NewCancelableContext(), + observManager: observManager, + nadController: nadController, }, - externalGatewayRouteInfo: apbExternalRouteController.ExternalGWRouteInfoCache, - eIPC: egressIPZoneController{ - NetInfo: &util.DefaultNetInfo{}, - nodeUpdateMutex: &sync.Mutex{}, - podAssignmentMutex: &sync.Mutex{}, - podAssignment: make(map[string]*podAssignmentState), - nbClient: cnci.nbClient, - watchFactory: cnci.watchFactory, - nodeZoneState: syncmap.NewSyncMap[bool](), - }, - loadbalancerClusterCache: make(map[kapi.Protocol]string), - clusterLoadBalancerGroupUUID: "", - switchLoadBalancerGroupUUID: "", - routerLoadBalancerGroupUUID: "", - svcController: svcController, - zoneChassisHandler: zoneChassisHandler, - apbExternalRouteController: apbExternalRouteController, - gatewayTopologyFactory: topology.NewGatewayTopologyFactory(cnci.nbClient), + externalGatewayRouteInfo: apbExternalRouteController.ExternalGWRouteInfoCache, + eIPC: eIPController, + loadbalancerClusterCache: make(map[kapi.Protocol]string), + zoneChassisHandler: zoneChassisHandler, + apbExternalRouteController: apbExternalRouteController, + svcController: svcController, + gatewayTopologyFactory: topology.NewGatewayTopologyFactory(cnci.nbClient), } // Allocate IPs for logical router port "GwRouterToJoinSwitchPrefix + OVNClusterRouter". This should always // allocate the first IPs in the join switch subnets. @@ -319,6 +316,17 @@ func (oc *DefaultNetworkController) syncDb() error { if err != nil { return fmt.Errorf("cleaning up stale pod selector address sets for network %v failed : %w", oc.GetNetworkName(), err) } + // LRP syncer must only be run once and because default controller always runs, it can perform LRP updates. + lrpSyncer := logical_router_policy.NewLRPSyncer(oc.nbClient, oc.controllerName) + if err = lrpSyncer.Sync(); err != nil { + return fmt.Errorf("failed to sync logical router policies: %v", err) + } + + // NAT syncer must only be run once. It performs OVN NAT updates. + nadSyncer := nat.NewNATSyncer(oc.nbClient, oc.controllerName) + if err = nadSyncer.Sync(); err != nil { + return fmt.Errorf("failed to sync NATs: %v", err) + } return nil } @@ -373,44 +381,22 @@ func (oc *DefaultNetworkController) Init(ctx context.Context) error { if _, _, err := util.RunOVNNbctl("--columns=_uuid", "list", "Load_Balancer_Group"); err != nil { klog.Warningf("Load Balancer Group support enabled, however version of OVN in use does not support Load Balancer Groups.") } else { - loadBalancerGroup := nbdb.LoadBalancerGroup{ - Name: ovntypes.ClusterLBGroupName, - } - err := libovsdbops.CreateOrUpdateLoadBalancerGroup(oc.nbClient, &loadBalancerGroup) + clusterLBGroupUUID, switchLBGroupUUID, routerLBGroupUUID, err := initLoadBalancerGroups(oc.nbClient, oc.NetInfo) if err != nil { - klog.Errorf("Error creating cluster-wide load balancer group %s: %v", ovntypes.ClusterLBGroupName, err) return err } - oc.clusterLoadBalancerGroupUUID = loadBalancerGroup.UUID - - loadBalancerGroup = nbdb.LoadBalancerGroup{ - Name: ovntypes.ClusterSwitchLBGroupName, - } - err = libovsdbops.CreateOrUpdateLoadBalancerGroup(oc.nbClient, &loadBalancerGroup) - if err != nil { - klog.Errorf("Error creating cluster-wide switch load balancer group %s: %v", ovntypes.ClusterSwitchLBGroupName, err) - return err - } - oc.switchLoadBalancerGroupUUID = loadBalancerGroup.UUID - - loadBalancerGroup = nbdb.LoadBalancerGroup{ - Name: ovntypes.ClusterRouterLBGroupName, - } - err = libovsdbops.CreateOrUpdateLoadBalancerGroup(oc.nbClient, &loadBalancerGroup) - if err != nil { - klog.Errorf("Error creating cluster-wide router load balancer group %s: %v", ovntypes.ClusterRouterLBGroupName, err) - return err - } - oc.routerLoadBalancerGroupUUID = loadBalancerGroup.UUID + oc.clusterLoadBalancerGroupUUID = clusterLBGroupUUID + oc.switchLoadBalancerGroupUUID = switchLBGroupUUID + oc.routerLoadBalancerGroupUUID = routerLBGroupUUID } - networkID := util.InvalidNetworkID + networkID := util.InvalidID nodeNames := []string{} for _, node := range existingNodes { node := *node nodeNames = append(nodeNames, node.Name) - if config.OVNKubernetesFeature.EnableInterconnect && networkID == util.InvalidNetworkID { + if config.OVNKubernetesFeature.EnableInterconnect && networkID == util.InvalidID { // get networkID from any node in the cluster networkID, _ = util.ParseNetworkIDAnnotation(&node, oc.zoneICHandler.GetNetworkName()) } @@ -745,18 +731,6 @@ func (h *defaultNetworkControllerEventHandler) AddResource(obj interface{}, from } return h.oc.ensurePod(nil, pod, true) - case factory.PolicyType: - np, ok := obj.(*knet.NetworkPolicy) - if !ok { - return fmt.Errorf("could not cast %T object to *knet.NetworkPolicy", obj) - } - - if err = h.oc.addNetworkPolicy(np); err != nil { - klog.Infof("Network Policy add failed for %s/%s, will try again later: %v", - np.Namespace, np.Name, err) - return err - } - case factory.NodeType: node, ok := obj.(*kapi.Node) if !ok { @@ -821,15 +795,15 @@ func (h *defaultNetworkControllerEventHandler) AddResource(obj interface{}, from case factory.EgressIPType: eIP := obj.(*egressipv1.EgressIP) - return h.oc.reconcileEgressIP(nil, eIP) + return h.oc.eIPC.reconcileEgressIP(nil, eIP) case factory.EgressIPNamespaceType: namespace := obj.(*kapi.Namespace) - return h.oc.reconcileEgressIPNamespace(nil, namespace) + return h.oc.eIPC.reconcileEgressIPNamespace(nil, namespace) case factory.EgressIPPodType: pod := obj.(*kapi.Pod) - return h.oc.reconcileEgressIPPod(nil, pod) + return h.oc.eIPC.reconcileEgressIPPod(nil, pod) case factory.EgressNodeType: node := obj.(*kapi.Node) @@ -841,18 +815,18 @@ func (h *defaultNetworkControllerEventHandler) AddResource(obj interface{}, from // add the 103 qos rule to new node's switch // NOTE: We don't need to remove this on node delete since entire node switch will get cleaned up if h.oc.isLocalZoneNode(node) { - if err := h.oc.ensureDefaultNoRerouteQoSRules(node.Name); err != nil { + if err := h.oc.eIPC.ensureDefaultNoRerouteQoSRules(node.Name); err != nil { return err } } // add the nodeIP to the default LRP (102 priority) destination address-set - err := h.oc.ensureDefaultNoRerouteNodePolicies() + err := h.oc.eIPC.ensureDefaultNoRerouteNodePolicies() if err != nil { return err } // Add routing specific to Egress IP NOTE: GARP configuration that // Egress IP depends on is added from the gateway reconciliation logic - return h.oc.addEgressNode(node) + return h.oc.eIPC.addEgressNode(node) case factory.NamespaceType: ns, ok := obj.(*kapi.Namespace) @@ -862,10 +836,8 @@ func (h *defaultNetworkControllerEventHandler) AddResource(obj interface{}, from return h.oc.AddNamespace(ns) default: - return fmt.Errorf("no add function for object type %s", h.objType) + return h.oc.AddResourceCommon(h.objType, obj) } - - return nil } // UpdateResource updates the specified object in the cluster to its version in newObj according to its @@ -923,8 +895,8 @@ func (h *defaultNetworkControllerEventHandler) UpdateResource(oldObj, newObj int // | | | | // |--------------------+-------------------+-------------------------------------------------+ newNodeIsLocalZoneNode := h.oc.isLocalZoneNode(newNode) - zoneClusterChanged := h.oc.nodeZoneClusterChanged(oldNode, newNode, newNodeIsLocalZoneNode) - nodeSubnetChanged := nodeSubnetChanged(oldNode, newNode) + zoneClusterChanged := h.oc.nodeZoneClusterChanged(oldNode, newNode, newNodeIsLocalZoneNode, types.DefaultNetworkName) + nodeSubnetChanged := nodeSubnetChanged(oldNode, newNode, types.DefaultNetworkName) var aggregatedErrors []error if newNodeIsLocalZoneNode { var nodeSyncsParam *nodeSyncs @@ -988,17 +960,17 @@ func (h *defaultNetworkControllerEventHandler) UpdateResource(oldObj, newObj int case factory.EgressIPType: oldEIP := oldObj.(*egressipv1.EgressIP) newEIP := newObj.(*egressipv1.EgressIP) - return h.oc.reconcileEgressIP(oldEIP, newEIP) + return h.oc.eIPC.reconcileEgressIP(oldEIP, newEIP) case factory.EgressIPNamespaceType: oldNamespace := oldObj.(*kapi.Namespace) newNamespace := newObj.(*kapi.Namespace) - return h.oc.reconcileEgressIPNamespace(oldNamespace, newNamespace) + return h.oc.eIPC.reconcileEgressIPNamespace(oldNamespace, newNamespace) case factory.EgressIPPodType: oldPod := oldObj.(*kapi.Pod) newPod := newObj.(*kapi.Pod) - return h.oc.reconcileEgressIPPod(oldPod, newPod) + return h.oc.eIPC.reconcileEgressIPPod(oldPod, newPod) case factory.EgressNodeType: oldNode := oldObj.(*kapi.Node) @@ -1014,19 +986,19 @@ func (h *defaultNetworkControllerEventHandler) UpdateResource(oldObj, newObj int // and removes the add event from retry cache, we'd need to ensure the qos rule exists // NOTE: We don't need to remove this on node delete since entire node switch will get cleaned up if h.oc.isLocalZoneNode(newNode) { - if err := h.oc.ensureDefaultNoRerouteQoSRules(newNode.Name); err != nil { + if err := h.oc.eIPC.ensureDefaultNoRerouteQoSRules(newNode.Name); err != nil { return err } } // update the nodeIP in the defalt-reRoute (102 priority) destination address-set if util.NodeHostCIDRsAnnotationChanged(oldNode, newNode) { klog.Infof("Egress IP detected IP address change for node %s. Updating no re-route policies", newNode.Name) - err := h.oc.ensureDefaultNoRerouteNodePolicies() + err := h.oc.eIPC.ensureDefaultNoRerouteNodePolicies() if err != nil { return err } } - return nil + return h.oc.eIPC.addEgressNode(newNode) case factory.NamespaceType: oldNs, newNs := oldObj.(*kapi.Namespace), newObj.(*kapi.Namespace) @@ -1049,13 +1021,6 @@ func (h *defaultNetworkControllerEventHandler) DeleteResource(obj, cachedObj int } return h.oc.removePod(pod, portInfo) - case factory.PolicyType: - knp, ok := obj.(*knet.NetworkPolicy) - if !ok { - return fmt.Errorf("could not cast obj of type %T to *knet.NetworkPolicy", obj) - } - return h.oc.deleteNetworkPolicy(knp) - case factory.NodeType: node, ok := obj.(*kapi.Node) if !ok { @@ -1074,20 +1039,20 @@ func (h *defaultNetworkControllerEventHandler) DeleteResource(obj, cachedObj int case factory.EgressIPType: eIP := obj.(*egressipv1.EgressIP) - return h.oc.reconcileEgressIP(eIP, nil) + return h.oc.eIPC.reconcileEgressIP(eIP, nil) case factory.EgressIPNamespaceType: namespace := obj.(*kapi.Namespace) - return h.oc.reconcileEgressIPNamespace(namespace, nil) + return h.oc.eIPC.reconcileEgressIPNamespace(namespace, nil) case factory.EgressIPPodType: pod := obj.(*kapi.Pod) - return h.oc.reconcileEgressIPPod(pod, nil) + return h.oc.eIPC.reconcileEgressIPPod(pod, nil) case factory.EgressNodeType: node := obj.(*kapi.Node) // remove the IPs from the destination address-set of the default LRP (102) - err := h.oc.ensureDefaultNoRerouteNodePolicies() + err := h.oc.eIPC.ensureDefaultNoRerouteNodePolicies() if err != nil { return err } @@ -1102,7 +1067,7 @@ func (h *defaultNetworkControllerEventHandler) DeleteResource(obj, cachedObj int return h.oc.deleteNamespace(ns) default: - return fmt.Errorf("object type %s not supported", h.objType) + return h.oc.DeleteResourceCommon(h.objType, obj) } } @@ -1127,10 +1092,10 @@ func (h *defaultNetworkControllerEventHandler) SyncFunc(objs []interface{}) erro syncFunc = h.oc.syncEgressFirewall case factory.EgressIPNamespaceType: - syncFunc = h.oc.syncEgressIPs + syncFunc = h.oc.eIPC.syncEgressIPs case factory.EgressNodeType: - syncFunc = h.oc.initClusterEgressPolicies + syncFunc = h.oc.eIPC.initClusterEgressPolicies case factory.EgressIPPodType, factory.EgressIPType: diff --git a/go-controller/pkg/ovn/default_network_controller_policy.go b/go-controller/pkg/ovn/default_network_controller_policy.go index d1f40491e5..ee3f5b88ec 100644 --- a/go-controller/pkg/ovn/default_network_controller_policy.go +++ b/go-controller/pkg/ovn/default_network_controller_policy.go @@ -1,91 +1,8 @@ package ovn -import ( - "fmt" - - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" - libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" - libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" - - knet "k8s.io/api/networking/v1" -) - // WatchNetworkPolicy starts the watching of network policy resource and calls // back the appropriate handler logic func (oc *DefaultNetworkController) WatchNetworkPolicy() error { _, err := oc.retryNetworkPolicies.WatchResource() return err } - -func (oc *DefaultNetworkController) addHairpinAllowACL() error { - var v4Match, v6Match, match string - - if config.IPv4Mode { - v4Match = fmt.Sprintf("%s.src == %s", "ip4", config.Gateway.MasqueradeIPs.V4OVNServiceHairpinMasqueradeIP.String()) - match = v4Match - } - if config.IPv6Mode { - v6Match = fmt.Sprintf("%s.src == %s", "ip6", config.Gateway.MasqueradeIPs.V6OVNServiceHairpinMasqueradeIP.String()) - match = v6Match - } - if config.IPv4Mode && config.IPv6Mode { - match = fmt.Sprintf("(%s || %s)", v4Match, v6Match) - } - - ingressACLIDs := oc.getNetpolDefaultACLDbIDs(string(knet.PolicyTypeIngress)) - ingressACL := libovsdbutil.BuildACL(ingressACLIDs, types.DefaultAllowPriority, match, - nbdb.ACLActionAllowRelated, nil, libovsdbutil.LportIngress) - - egressACLIDs := oc.getNetpolDefaultACLDbIDs(string(knet.PolicyTypeEgress)) - egressACL := libovsdbutil.BuildACL(egressACLIDs, types.DefaultAllowPriority, match, - nbdb.ACLActionAllowRelated, nil, libovsdbutil.LportEgressAfterLB) - - ops, err := libovsdbops.CreateOrUpdateACLsOps(oc.nbClient, nil, ingressACL, egressACL) - if err != nil { - return fmt.Errorf("failed to create or update hairpin allow ACL %v", err) - } - - ops, err = libovsdbops.AddACLsToPortGroupOps(oc.nbClient, ops, oc.getClusterPortGroupName(types.ClusterPortGroupNameBase), - ingressACL, egressACL) - if err != nil { - return fmt.Errorf("failed to add ACL hairpin allow acl to port group: %v", err) - } - - _, err = libovsdbops.TransactAndCheck(oc.nbClient, ops) - if err != nil { - return err - } - - return nil -} - -func (oc *DefaultNetworkController) syncNetworkPolicies(networkPolicies []interface{}) error { - expectedPolicies := make(map[string]map[string]bool) - for _, npInterface := range networkPolicies { - policy, ok := npInterface.(*knet.NetworkPolicy) - if !ok { - return fmt.Errorf("spurious object in syncNetworkPolicies: %v", npInterface) - } - if nsMap, ok := expectedPolicies[policy.Namespace]; ok { - nsMap[policy.Name] = true - } else { - expectedPolicies[policy.Namespace] = map[string]bool{ - policy.Name: true, - } - } - } - err := oc.syncNetworkPoliciesCommon(expectedPolicies) - if err != nil { - return err - } - - // add default hairpin allow acl - err = oc.addHairpinAllowACL() - if err != nil { - return fmt.Errorf("failed to create allow hairping acl: %w", err) - } - - return nil -} diff --git a/go-controller/pkg/ovn/dns_name_resolver/dns_suite_test.go b/go-controller/pkg/ovn/dns_name_resolver/dns_suite_test.go index ba128dbe41..40b4c0a872 100644 --- a/go-controller/pkg/ovn/dns_name_resolver/dns_suite_test.go +++ b/go-controller/pkg/ovn/dns_name_resolver/dns_suite_test.go @@ -3,7 +3,7 @@ package dnsnameresolver import ( "testing" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/ovn/dns_name_resolver/external_dns.go b/go-controller/pkg/ovn/dns_name_resolver/external_dns.go index 8107222411..15f3ac3c72 100644 --- a/go-controller/pkg/ovn/dns_name_resolver/external_dns.go +++ b/go-controller/pkg/ovn/dns_name_resolver/external_dns.go @@ -62,7 +62,7 @@ func NewExternalEgressDNS( extEgDNS.dnsLister = ocpnetworklisterv1alpha1.NewDNSNameResolverLister(dnsSharedIndexInformer.GetIndexer()) dnsConfig := &controller.ControllerConfig[ocpnetworkapiv1alpha1.DNSNameResolver]{ - RateLimiter: workqueue.NewItemFastSlowRateLimiter(time.Second, 5*time.Second, 5), + RateLimiter: workqueue.NewTypedItemFastSlowRateLimiter[string](time.Second, 5*time.Second, 5), Informer: dnsSharedIndexInformer, Lister: extEgDNS.dnsLister.List, ObjNeedsUpdate: dnsNeedsUpdate, diff --git a/go-controller/pkg/ovn/dns_name_resolver/external_dns_test.go b/go-controller/pkg/ovn/dns_name_resolver/external_dns_test.go index d8afbc84a4..138aa4ad79 100644 --- a/go-controller/pkg/ovn/dns_name_resolver/external_dns_test.go +++ b/go-controller/pkg/ovn/dns_name_resolver/external_dns_test.go @@ -15,8 +15,7 @@ import ( libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" - "github.com/onsi/ginkgo" - "github.com/onsi/ginkgo/extensions/table" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -158,7 +157,7 @@ var _ = ginkgo.Describe("Egress Firewall External DNS Operations", func() { ipV6Mode ipMode = "ipv6" dualStackMode ipMode = "dual-stack" ) - table.DescribeTable("Should add addresses for different ip modes with cluster subnet/without cluster subnet", func(mode ipMode, addresses []string, ignoreClusterSubnet bool, expectedAddresses []string) { + ginkgo.DescribeTable("Should add addresses for different ip modes with cluster subnet/without cluster subnet", func(mode ipMode, addresses []string, ignoreClusterSubnet bool, expectedAddresses []string) { start() switch mode { @@ -183,15 +182,15 @@ var _ = ginkgo.Describe("Egress Firewall External DNS Operations", func() { expectDNSNameWithAddresses(extEgDNS, dnsName, expectedAddresses) }, - table.Entry("Should add IPv4 addresses", ipV4Mode, []string{"1.1.1.1", "2.2.2.2"}, true, []string{"1.1.1.1", "2.2.2.2"}), - table.Entry("Should add IPv6 address", ipV6Mode, []string{"2001:0db8:85a3:0000:0000:8a2e:0370:7334"}, true, []string{"2001:0db8:85a3:0000:0000:8a2e:0370:7334"}), - table.Entry("Should support dual stack", dualStackMode, []string{"1.1.1.1", "2.2.2.2", "2001:0db8:85a3:0000:0000:8a2e:0370:7334"}, true, []string{"1.1.1.1", "2.2.2.2", "2001:0db8:85a3:0000:0000:8a2e:0370:7334"}), - table.Entry("Should only add supported ipV4 addresses", ipV4Mode, []string{"1.1.1.1", "2.2.2.2", "2001:0db8:85a3:0000:0000:8a2e:0370:7334"}, true, []string{"1.1.1.1", "2.2.2.2"}), - table.Entry("Should only add supported ipV6 addresses", ipV6Mode, []string{"1.1.1.1", "2.2.2.2", "2001:0db8:85a3:0000:0000:8a2e:0370:7334"}, true, []string{"2001:0db8:85a3:0000:0000:8a2e:0370:7334"}), - table.Entry("Should not add IP addresses matching cluster subnet", ipV4Mode, []string{"10.128.0.1"}, true, []string{}), - table.Entry("Should add IP addresses matching cluster subnet", ipV4Mode, []string{"10.128.0.1"}, false, []string{"10.128.0.1"}), - table.Entry("Should not add IP addresses matching cluster subnet, should add other IPs", ipV4Mode, []string{"10.128.0.1", "1.1.1.1", "2.2.2.2"}, true, []string{"1.1.1.1", "2.2.2.2"}), - table.Entry("Should add IP addresses matching cluster subnet, should add other IPs also", ipV4Mode, []string{"10.128.0.1", "1.1.1.1", "2.2.2.2"}, false, []string{"10.128.0.1", "1.1.1.1", "2.2.2.2"}), + ginkgo.Entry("Should add IPv4 addresses", ipV4Mode, []string{"1.1.1.1", "2.2.2.2"}, true, []string{"1.1.1.1", "2.2.2.2"}), + ginkgo.Entry("Should add IPv6 address", ipV6Mode, []string{"2001:0db8:85a3:0000:0000:8a2e:0370:7334"}, true, []string{"2001:0db8:85a3:0000:0000:8a2e:0370:7334"}), + ginkgo.Entry("Should support dual stack", dualStackMode, []string{"1.1.1.1", "2.2.2.2", "2001:0db8:85a3:0000:0000:8a2e:0370:7334"}, true, []string{"1.1.1.1", "2.2.2.2", "2001:0db8:85a3:0000:0000:8a2e:0370:7334"}), + ginkgo.Entry("Should only add supported ipV4 addresses", ipV4Mode, []string{"1.1.1.1", "2.2.2.2", "2001:0db8:85a3:0000:0000:8a2e:0370:7334"}, true, []string{"1.1.1.1", "2.2.2.2"}), + ginkgo.Entry("Should only add supported ipV6 addresses", ipV6Mode, []string{"1.1.1.1", "2.2.2.2", "2001:0db8:85a3:0000:0000:8a2e:0370:7334"}, true, []string{"2001:0db8:85a3:0000:0000:8a2e:0370:7334"}), + ginkgo.Entry("Should not add IP addresses matching cluster subnet", ipV4Mode, []string{"10.128.0.1"}, true, []string{}), + ginkgo.Entry("Should add IP addresses matching cluster subnet", ipV4Mode, []string{"10.128.0.1"}, false, []string{"10.128.0.1"}), + ginkgo.Entry("Should not add IP addresses matching cluster subnet, should add other IPs", ipV4Mode, []string{"10.128.0.1", "1.1.1.1", "2.2.2.2"}, true, []string{"1.1.1.1", "2.2.2.2"}), + ginkgo.Entry("Should add IP addresses matching cluster subnet, should add other IPs also", ipV4Mode, []string{"10.128.0.1", "1.1.1.1", "2.2.2.2"}, false, []string{"10.128.0.1", "1.1.1.1", "2.2.2.2"}), ) }) diff --git a/go-controller/pkg/ovn/egressfirewall.go b/go-controller/pkg/ovn/egressfirewall.go index ca271ac025..16cd7c2535 100644 --- a/go-controller/pkg/ovn/egressfirewall.go +++ b/go-controller/pkg/ovn/egressfirewall.go @@ -476,7 +476,7 @@ func (oc *DefaultNetworkController) createEgressFirewallACLOps(ops []libovsdb.Op libovsdbutil.LportIngress, ) var err error - ops, err = libovsdbops.CreateOrUpdateACLsOps(oc.nbClient, ops, egressFirewallACL) + ops, err = libovsdbops.CreateOrUpdateACLsOps(oc.nbClient, ops, oc.GetSamplingConfig(), egressFirewallACL) if err != nil { return ops, fmt.Errorf("failed to create egressFirewall ACL %v: %v", egressFirewallACL, err) } @@ -748,7 +748,7 @@ func (oc *DefaultNetworkController) getEgressFirewallACLDbIDs(namespace string, func (oc *DefaultNetworkController) newEFNodeController(nodeInformer coreinformers.NodeInformer) controller.Controller { controllerConfig := &controller.ControllerConfig[kapi.Node]{ - RateLimiter: workqueue.NewItemFastSlowRateLimiter(time.Second, 5*time.Second, 5), + RateLimiter: workqueue.NewTypedItemFastSlowRateLimiter[string](time.Second, 5*time.Second, 5), Informer: nodeInformer.Informer(), Lister: nodeInformer.Lister().List, ObjNeedsUpdate: oc.efNodeNeedsUpdate, diff --git a/go-controller/pkg/ovn/egressfirewall_test.go b/go-controller/pkg/ovn/egressfirewall_test.go index e3df4a6e93..1da304a1b7 100644 --- a/go-controller/pkg/ovn/egressfirewall_test.go +++ b/go-controller/pkg/ovn/egressfirewall_test.go @@ -9,8 +9,8 @@ import ( "time" "github.com/miekg/dns" - "github.com/onsi/ginkgo" - "github.com/onsi/ginkgo/extensions/table" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" "github.com/stretchr/testify/mock" "github.com/urfave/cli/v2" @@ -513,7 +513,7 @@ var _ = ginkgo.Describe("OVN EgressFirewall Operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) - table.DescribeTable("correctly removes stale acl and DNS address set created", func(gwMode config.GatewayMode, oldDNS bool) { + ginkgo.DescribeTable("correctly removes stale acl and DNS address set created", func(gwMode config.GatewayMode, oldDNS bool) { if !oldDNS { // enable the dns name resolver flag. config.OVNKubernetesFeature.EnableDNSNameResolver = true @@ -552,8 +552,8 @@ var _ = ginkgo.Describe("OVN EgressFirewall Operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }, - table.Entry(fmt.Sprintf("correctly removes stale acl and DNS address set created using old dns resolution, gateway mode %s", gwMode), gwMode, true), - table.Entry(fmt.Sprintf("correctly removes stale acl and DNS address set created using new dns resolution, gateway mode %s", gwMode), gwMode, false), + ginkgo.Entry(fmt.Sprintf("correctly removes stale acl and DNS address set created using old dns resolution, gateway mode %s", gwMode), gwMode, true), + ginkgo.Entry(fmt.Sprintf("correctly removes stale acl and DNS address set created using new dns resolution, gateway mode %s", gwMode), gwMode, false), ) }) ginkgo.Context("during execution", func() { @@ -883,9 +883,10 @@ var _ = ginkgo.Describe("OVN EgressFirewall Operations", func() { // make sure egress firewall acl was not updated as we are still holding a lock getACLs := func() int { - acls, err := libovsdbops.FindACLsWithPredicate(fakeOVN.nbClient, func(acl *nbdb.ACL) bool { - return true - }) + // find all existing egress firewall ACLs + predicateIDs := libovsdbops.NewDbObjectIDs(libovsdbops.ACLEgressFirewall, "default-network-controller", nil) + aclP := libovsdbops.GetPredicate[*nbdb.ACL](predicateIDs, nil) + acls, err := libovsdbops.FindACLsWithPredicate(fakeOVN.nbClient, aclP) gomega.Expect(err).NotTo(gomega.HaveOccurred()) return len(acls) } @@ -1281,7 +1282,7 @@ var _ = ginkgo.Describe("OVN EgressFirewall Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) - table.DescribeTable("correctly cleans up object that failed to be created", func(gwMode config.GatewayMode, oldDNS bool) { + ginkgo.DescribeTable("correctly cleans up object that failed to be created", func(gwMode config.GatewayMode, oldDNS bool) { config.Gateway.Mode = gwMode if !oldDNS { // enable the dns name resolver flag. @@ -1356,10 +1357,10 @@ var _ = ginkgo.Describe("OVN EgressFirewall Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }, - table.Entry(fmt.Sprintf("correctly cleans up object that failed to be created using old dns resolution, gateway mode %s", gwMode), gwMode, true), - table.Entry(fmt.Sprintf("correctly cleans up object that failed to be created using new dns resolution, gateway mode %s", gwMode), gwMode, false), + ginkgo.Entry(fmt.Sprintf("correctly cleans up object that failed to be created using old dns resolution, gateway mode %s", gwMode), gwMode, true), + ginkgo.Entry(fmt.Sprintf("correctly cleans up object that failed to be created using new dns resolution, gateway mode %s", gwMode), gwMode, false), ) - table.DescribeTable("correctly creates egress firewall using different dns resolution methods, dns name types and ip families", func(gwMode config.GatewayMode, oldDNS bool, dnsName, resolvedIP string) { + ginkgo.DescribeTable("correctly creates egress firewall using different dns resolution methods, dns name types and ip families", func(gwMode config.GatewayMode, oldDNS bool, dnsName, resolvedIP string) { if !oldDNS { // enable the dns name resolver flag. config.OVNKubernetesFeature.EnableDNSNameResolver = true @@ -1427,12 +1428,12 @@ var _ = ginkgo.Describe("OVN EgressFirewall Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }, - table.Entry(fmt.Sprintf("correctly creates egress firewall using old dns resolution for regular DNS name with IPv4 address, gateway mode %s", gwMode), gwMode, true, "a.b.c", "2.2.2.2"), - table.Entry(fmt.Sprintf("correctly creates egress firewall using new dns resolution for regular DNS name with IPv4 address, gateway mode %s", gwMode), gwMode, false, "a.b.c", "2.2.2.2"), - table.Entry(fmt.Sprintf("correctly creates egress firewall using old dns resolution for regular DNS name with IPv6 address, gateway mode %s", gwMode), gwMode, true, "a.b.c", "2002::1234:abcd:ffff:c0a8:101"), - table.Entry(fmt.Sprintf("correctly creates egress firewall using new dns resolution for regular DNS name with IPv6 address, gateway mode %s", gwMode), gwMode, false, "a.b.c", "2002::1234:abcd:ffff:c0a8:101"), - table.Entry(fmt.Sprintf("correctly creates egress firewall using new dns resolution for wildcard DNS name with IPv4 address, gateway mode %s", gwMode), gwMode, false, "*.b.c", "2.2.2.2"), - table.Entry(fmt.Sprintf("correctly creates egress firewall using new dns resolution for wildcard DNS name with IPv6 address, gateway mode %s", gwMode), gwMode, false, "*.b.c", "2002::1234:abcd:ffff:c0a8:101"), + ginkgo.Entry(fmt.Sprintf("correctly creates egress firewall using old dns resolution for regular DNS name with IPv4 address, gateway mode %s", gwMode), gwMode, true, "a.b.c", "2.2.2.2"), + ginkgo.Entry(fmt.Sprintf("correctly creates egress firewall using new dns resolution for regular DNS name with IPv4 address, gateway mode %s", gwMode), gwMode, false, "a.b.c", "2.2.2.2"), + ginkgo.Entry(fmt.Sprintf("correctly creates egress firewall using old dns resolution for regular DNS name with IPv6 address, gateway mode %s", gwMode), gwMode, true, "a.b.c", "2002::1234:abcd:ffff:c0a8:101"), + ginkgo.Entry(fmt.Sprintf("correctly creates egress firewall using new dns resolution for regular DNS name with IPv6 address, gateway mode %s", gwMode), gwMode, false, "a.b.c", "2002::1234:abcd:ffff:c0a8:101"), + ginkgo.Entry(fmt.Sprintf("correctly creates egress firewall using new dns resolution for wildcard DNS name with IPv4 address, gateway mode %s", gwMode), gwMode, false, "*.b.c", "2.2.2.2"), + ginkgo.Entry(fmt.Sprintf("correctly creates egress firewall using new dns resolution for wildcard DNS name with IPv6 address, gateway mode %s", gwMode), gwMode, false, "*.b.c", "2002::1234:abcd:ffff:c0a8:101"), ) }) } diff --git a/go-controller/pkg/ovn/egressgw.go b/go-controller/pkg/ovn/egressgw.go index a47f553a60..d76c0613ff 100644 --- a/go-controller/pkg/ovn/egressgw.go +++ b/go-controller/pkg/ovn/egressgw.go @@ -12,6 +12,7 @@ import ( libovsdbclient "github.com/ovn-org/libovsdb/client" "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" @@ -580,7 +581,8 @@ func (oc *DefaultNetworkController) deletePodSNAT(nodeName string, extIPs, podIP klog.V(4).Infof("Node %s is not in the local zone %s", nodeName, oc.zone) return nil } - ops, err := deletePodSNATOps(oc.nbClient, nil, oc.GetNetworkScopedGWRouterName(nodeName), extIPs, podIPNets) + // Default network does not set any matches in Pod SNAT + ops, err := deletePodSNATOps(oc.nbClient, nil, oc.GetNetworkScopedGWRouterName(nodeName), extIPs, podIPNets, "") if err != nil { return err } @@ -593,7 +595,7 @@ func (oc *DefaultNetworkController) deletePodSNAT(nodeName string, extIPs, podIP } // buildPodSNAT builds per pod SNAT rules towards the nodeIP that are applied to the GR where the pod resides -func buildPodSNAT(extIPs, podIPNets []*net.IPNet) ([]*nbdb.NAT, error) { +func buildPodSNAT(extIPs, podIPNets []*net.IPNet, match string) ([]*nbdb.NAT, error) { nats := make([]*nbdb.NAT, 0, len(extIPs)*len(podIPNets)) for _, podIPNet := range podIPNets { fullMaskPodNet := &net.IPNet{ @@ -601,13 +603,13 @@ func buildPodSNAT(extIPs, podIPNets []*net.IPNet) ([]*nbdb.NAT, error) { Mask: util.GetIPFullMask(podIPNet.IP), } if len(extIPs) == 0 { - nats = append(nats, libovsdbops.BuildSNAT(nil, fullMaskPodNet, "", nil)) + nats = append(nats, libovsdbops.BuildSNATWithMatch(nil, fullMaskPodNet, "", nil, match)) } else { for _, gwIPNet := range extIPs { if utilnet.IsIPv6CIDR(gwIPNet) != utilnet.IsIPv6CIDR(podIPNet) { continue } - nats = append(nats, libovsdbops.BuildSNAT(&gwIPNet.IP, fullMaskPodNet, "", nil)) + nats = append(nats, libovsdbops.BuildSNATWithMatch(&gwIPNet.IP, fullMaskPodNet, "", nil, match)) } } } @@ -630,8 +632,8 @@ func getExternalIPsGR(watchFactory *factory.WatchFactory, nodeName string) ([]*n // deletePodSNATOps creates ovsdb operation that removes per pod SNAT rules towards the nodeIP that are applied to the GR where the pod resides // used when disableSNATMultipleGWs=true -func deletePodSNATOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, gwRouterName string, extIPs, podIPNets []*net.IPNet) ([]ovsdb.Operation, error) { - nats, err := buildPodSNAT(extIPs, podIPNets) +func deletePodSNATOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, gwRouterName string, extIPs, podIPNets []*net.IPNet, match string) ([]ovsdb.Operation, error) { + nats, err := buildPodSNAT(extIPs, podIPNets, match) if err != nil { return nil, err } @@ -648,7 +650,7 @@ func deletePodSNATOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, gwR // addOrUpdatePodSNAT adds or updates per pod SNAT rules towards the nodeIP that are applied to the GR where the pod resides // used when disableSNATMultipleGWs=true func addOrUpdatePodSNAT(nbClient libovsdbclient.Client, gwRouterName string, extIPs, podIfAddrs []*net.IPNet) error { - nats, err := buildPodSNAT(extIPs, podIfAddrs) + nats, err := buildPodSNAT(extIPs, podIfAddrs, "") if err != nil { return err } @@ -664,9 +666,9 @@ func addOrUpdatePodSNAT(nbClient libovsdbclient.Client, gwRouterName string, ext // addOrUpdatePodSNATOps returns the operation that adds or updates per pod SNAT rules towards the nodeIP that are // applied to the GR where the pod resides // used when disableSNATMultipleGWs=true -func addOrUpdatePodSNATOps(nbClient libovsdbclient.Client, gwRouterName string, extIPs, podIfAddrs []*net.IPNet, ops []ovsdb.Operation) ([]ovsdb.Operation, error) { +func addOrUpdatePodSNATOps(nbClient libovsdbclient.Client, gwRouterName string, extIPs, podIfAddrs []*net.IPNet, match string, ops []ovsdb.Operation) ([]ovsdb.Operation, error) { router := &nbdb.LogicalRouter{Name: gwRouterName} - nats, err := buildPodSNAT(extIPs, podIfAddrs) + nats, err := buildPodSNAT(extIPs, podIfAddrs, match) if err != nil { return nil, err } diff --git a/go-controller/pkg/ovn/egressgw_test.go b/go-controller/pkg/ovn/egressgw_test.go index df7267d479..a4c0aff4c7 100644 --- a/go-controller/pkg/ovn/egressgw_test.go +++ b/go-controller/pkg/ovn/egressgw_test.go @@ -24,8 +24,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" nettypes "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" - "github.com/onsi/ginkgo" - "github.com/onsi/ginkgo/extensions/table" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "github.com/urfave/cli/v2" ) @@ -61,7 +60,7 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { ginkgo.Context("on setting namespace gateway annotations", func() { - table.DescribeTable("reconciles an new pod with namespace single exgw annotation already set", func(bfd bool, finalNB []libovsdbtest.TestData) { + ginkgo.DescribeTable("reconciles an new pod with namespace single exgw annotation already set", func(bfd bool, finalNB []libovsdbtest.TestData) { app.Action = func(ctx *cli.Context) error { namespaceT := *newNamespace(namespaceName) @@ -125,7 +124,7 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }, table.Entry("No BFD", false, []libovsdbtest.TestData{ + }, ginkgo.Entry("No BFD", false, []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, @@ -161,7 +160,7 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { StaticRoutes: []string{"static-route-1-UUID"}, }, }), - table.Entry("BFD Enabled", true, []libovsdbtest.TestData{ + ginkgo.Entry("BFD Enabled", true, []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, @@ -204,7 +203,7 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, })) - table.DescribeTable("reconciles an new pod with namespace single exgw annotation already set with pod event first", func(bfd bool, finalNB []libovsdbtest.TestData) { + ginkgo.DescribeTable("reconciles an new pod with namespace single exgw annotation already set with pod event first", func(bfd bool, finalNB []libovsdbtest.TestData) { app.Action = func(ctx *cli.Context) error { namespaceT := *newNamespace(namespaceName) @@ -265,7 +264,7 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }, table.Entry("No BFD", false, []libovsdbtest.TestData{ + }, ginkgo.Entry("No BFD", false, []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, @@ -301,7 +300,7 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { StaticRoutes: []string{"static-route-1-UUID"}, }, }), - table.Entry("BFD Enabled", true, []libovsdbtest.TestData{ + ginkgo.Entry("BFD Enabled", true, []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, @@ -344,7 +343,7 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, })) - table.DescribeTable("reconciles an new pod with namespace double exgw annotation already set", func(bfd bool, finalNB []libovsdbtest.TestData) { + ginkgo.DescribeTable("reconciles an new pod with namespace double exgw annotation already set", func(bfd bool, finalNB []libovsdbtest.TestData) { app.Action = func(ctx *cli.Context) error { @@ -409,7 +408,7 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }, - table.Entry("No BFD", false, []libovsdbtest.TestData{ + ginkgo.Entry("No BFD", false, []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, @@ -455,7 +454,7 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { StaticRoutes: []string{"static-route-1-UUID", "static-route-2-UUID"}, }, }), - table.Entry("BFD Enabled", true, []libovsdbtest.TestData{ + ginkgo.Entry("BFD Enabled", true, []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, @@ -515,7 +514,7 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }), ) - table.DescribeTable("reconciles deleting a pod with namespace double exgw annotation already set", + ginkgo.DescribeTable("reconciles deleting a pod with namespace double exgw annotation already set", func(bfd bool, initNB []libovsdbtest.TestData, finalNB []libovsdbtest.TestData, @@ -576,7 +575,7 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }, - table.Entry("No BFD", false, + ginkgo.Entry("No BFD", false, []libovsdbtest.TestData{ &nbdb.LogicalSwitch{ UUID: "node1", @@ -620,7 +619,7 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, }, ), - table.Entry("BFD", true, + ginkgo.Entry("BFD", true, []libovsdbtest.TestData{ &nbdb.LogicalSwitch{ UUID: "node1", @@ -678,7 +677,7 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { ), ) - table.DescribeTable("reconciles deleting a pod with namespace double exgw annotation already set IPV6", + ginkgo.DescribeTable("reconciles deleting a pod with namespace double exgw annotation already set IPV6", func(bfd bool, initNB []libovsdbtest.TestData, finalNB []libovsdbtest.TestData) { @@ -737,7 +736,7 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }, - table.Entry("BFD IPV6", true, []libovsdbtest.TestData{ + ginkgo.Entry("BFD IPV6", true, []libovsdbtest.TestData{ &nbdb.LogicalSwitch{ UUID: "node1", Name: "node1", @@ -793,7 +792,7 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { ), ) - table.DescribeTable("reconciles deleting a exgw namespace with active pod", + ginkgo.DescribeTable("reconciles deleting a exgw namespace with active pod", func(bfd bool, initNB []libovsdbtest.TestData, finalNB []libovsdbtest.TestData, @@ -855,7 +854,7 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }, - table.Entry("No BFD", false, + ginkgo.Entry("No BFD", false, []libovsdbtest.TestData{ &nbdb.LogicalSwitch{ UUID: "node1", @@ -914,7 +913,7 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, }, ), - table.Entry("BFD", true, + ginkgo.Entry("BFD", true, []libovsdbtest.TestData{ &nbdb.LogicalSwitch{ UUID: "node1", @@ -992,7 +991,7 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { namespace2Name = "namespace2" gwPodName = "gwPod" ) - table.DescribeTable("reconciles a host networked pod acting as a exgw for another namespace for new pod", func(bfd bool, finalNB []libovsdbtest.TestData) { + ginkgo.DescribeTable("reconciles a host networked pod acting as a exgw for another namespace for new pod", func(bfd bool, finalNB []libovsdbtest.TestData) { app.Action = func(ctx *cli.Context) error { namespaceT := *newNamespace(namespaceName) @@ -1067,7 +1066,7 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }, table.Entry("No BFD", false, []libovsdbtest.TestData{ + }, ginkgo.Entry("No BFD", false, []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, @@ -1107,7 +1106,7 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { StaticRoutes: []string{"static-route-1-UUID"}, }, }), - table.Entry("BFD Enabled", true, []libovsdbtest.TestData{ + ginkgo.Entry("BFD Enabled", true, []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, @@ -1154,7 +1153,7 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, })) - table.DescribeTable("reconciles a host networked pod acting as a exgw for another namespace for existing pod", func(bfd bool, finalNB []libovsdbtest.TestData) { + ginkgo.DescribeTable("reconciles a host networked pod acting as a exgw for another namespace for existing pod", func(bfd bool, finalNB []libovsdbtest.TestData) { app.Action = func(ctx *cli.Context) error { namespaceT := *newNamespace(namespaceName) @@ -1227,7 +1226,7 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }, table.Entry("No BFD", false, []libovsdbtest.TestData{ + }, ginkgo.Entry("No BFD", false, []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, @@ -1267,7 +1266,7 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { StaticRoutes: []string{"static-route-1-UUID"}, }, }), - table.Entry("BFD Enabled", true, []libovsdbtest.TestData{ + ginkgo.Entry("BFD Enabled", true, []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, @@ -1314,7 +1313,7 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, })) - table.DescribeTable("reconciles a multus networked pod acting as a exgw for another namespace for new pod", func(bfd bool, finalNB []libovsdbtest.TestData) { + ginkgo.DescribeTable("reconciles a multus networked pod acting as a exgw for another namespace for new pod", func(bfd bool, finalNB []libovsdbtest.TestData) { app.Action = func(ctx *cli.Context) error { ns := nettypes.NetworkStatus{Name: "dummy", IPs: []string{"11.0.0.1"}} networkStatuses := []nettypes.NetworkStatus{ns} @@ -1397,7 +1396,7 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }, table.Entry("No BFD", false, []libovsdbtest.TestData{ + }, ginkgo.Entry("No BFD", false, []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, @@ -1437,7 +1436,7 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { StaticRoutes: []string{"static-route-1-UUID"}, }, }), - table.Entry("BFD Enabled", true, []libovsdbtest.TestData{ + ginkgo.Entry("BFD Enabled", true, []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, @@ -1484,7 +1483,7 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, })) - table.DescribeTable("reconciles deleting a host networked pod acting as a exgw for another namespace for existing pod", + ginkgo.DescribeTable("reconciles deleting a host networked pod acting as a exgw for another namespace for existing pod", func(bfd bool, beforeDeleteNB []libovsdbtest.TestData, afterDeleteNB []libovsdbtest.TestData, @@ -1576,7 +1575,7 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }, - table.Entry("No BFD", false, + ginkgo.Entry("No BFD", false, []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", @@ -1650,7 +1649,7 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { "", &adminpolicybasedrouteapi.AdminPolicyBasedExternalRouteList{}, ), - table.Entry("BFD Enabled", true, []libovsdbtest.TestData{ + ginkgo.Entry("BFD Enabled", true, []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, @@ -1729,7 +1728,7 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { "", &adminpolicybasedrouteapi.AdminPolicyBasedExternalRouteList{}, ), - table.Entry("No BFD and with overlapping APB External Route CR and annotation", false, + ginkgo.Entry("No BFD and with overlapping APB External Route CR and annotation", false, []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", @@ -2412,7 +2411,7 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) - table.DescribeTable("should keep the hybrid route policy after deleting the namespace gateway annotation when there is an APB External Route CR overlapping the same external gateway IP", func(legacyFirst bool) { + ginkgo.DescribeTable("should keep the hybrid route policy after deleting the namespace gateway annotation when there is an APB External Route CR overlapping the same external gateway IP", func(legacyFirst bool) { app.Action = func(ctx *cli.Context) error { config.Gateway.Mode = config.GatewayModeLocal @@ -2578,8 +2577,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }, - table.Entry("when APBRoute handles first", false), - table.Entry("when external_gw handles first", true)) + ginkgo.Entry("when APBRoute handles first", false), + ginkgo.Entry("when external_gw handles first", true)) ginkgo.It("should create a single policy for concurrent addHybridRoutePolicy for the same node", func() { app.Action = func(ctx *cli.Context) error { diff --git a/go-controller/pkg/ovn/egressip.go b/go-controller/pkg/ovn/egressip.go index 217e4efbe8..f160840d95 100644 --- a/go-controller/pkg/ovn/egressip.go +++ b/go-controller/pkg/ovn/egressip.go @@ -1,6 +1,7 @@ package ovn import ( + "context" "encoding/json" "errors" "fmt" @@ -14,27 +15,35 @@ import ( libovsdbclient "github.com/ovn-org/libovsdb/client" "github.com/ovn-org/libovsdb/ovsdb" libovsdb "github.com/ovn-org/libovsdb/ovsdb" + + ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" egressipv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + nad "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/network-attach-def-controller" addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" egresssvc "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/controller/egressservice" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/controller/udnenabledsvc" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/syncmap" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" utilerrors "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/errors" + cnitypes "github.com/containernetworking/cni/pkg/types" + corev1 "k8s.io/api/core/v1" kapi "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/record" "k8s.io/client-go/util/retry" "k8s.io/klog/v2" utilnet "k8s.io/utils/net" @@ -49,38 +58,172 @@ const ( NodeIPAddrSetName egressIPAddrSetName = "node-ips" EgressIPServedPodsAddrSetName egressIPAddrSetName = "egressip-served-pods" // the possible values for LRP DB objects for EIPs - IPFamilyValueV4 egressIPFamilyValue = "ip4" - IPFamilyValueV6 egressIPFamilyValue = "ip6" - IPFamilyValue egressIPFamilyValue = "ip" // use it when its dualstack - ReplyTrafficNoReroute egressIPNoReroutePolicyName = "EIP-No-Reroute-reply-traffic" - ReplyTrafficMark egressIPQoSRuleName = "EgressIP-Mark-Reply-Traffic" + IPFamilyValueV4 egressIPFamilyValue = "ip4" + IPFamilyValueV6 egressIPFamilyValue = "ip6" + IPFamilyValue egressIPFamilyValue = "ip" // use it when its dualstack + ReplyTrafficNoReroute egressIPNoReroutePolicyName = "EIP-No-Reroute-reply-traffic" + NoReRoutePodToPod egressIPNoReroutePolicyName = "EIP-No-Reroute-Pod-To-Pod" + NoReRoutePodToJoin egressIPNoReroutePolicyName = "EIP-No-Reroute-Pod-To-Join" + NoReRoutePodToNode egressIPNoReroutePolicyName = "EIP-No-Reroute-Pod-To-Node" + NoReRouteUDNPodToCDNSvc egressIPNoReroutePolicyName = "EIP-No-Reroute-Pod-To-CDN-Svc" + ReplyTrafficMark egressIPQoSRuleName = "EgressIP-Mark-Reply-Traffic" + dbIDEIPNamePodDivider = "_" ) -func getEgressIPAddrSetDbIDs(name egressIPAddrSetName, controller string) *libovsdbops.DbObjectIDs { +func getEgressIPAddrSetDbIDs(name egressIPAddrSetName, network, controller string) *libovsdbops.DbObjectIDs { return libovsdbops.NewDbObjectIDs(libovsdbops.AddressSetEgressIP, controller, map[libovsdbops.ExternalIDKey]string{ // egress ip creates cluster-wide address sets with egressIpAddrSetName libovsdbops.ObjectNameKey: string(name), + libovsdbops.NetworkKey: network, + }) +} + +func getEgressIPLRPReRouteDbIDs(egressIPName, podNamespace, podName string, ipFamily egressIPFamilyValue, network, controller string) *libovsdbops.DbObjectIDs { + return libovsdbops.NewDbObjectIDs(libovsdbops.LogicalRouterPolicyEgressIP, controller, map[libovsdbops.ExternalIDKey]string{ + libovsdbops.ObjectNameKey: fmt.Sprintf("%s%s%s/%s", egressIPName, dbIDEIPNamePodDivider, podNamespace, podName), + libovsdbops.PriorityKey: fmt.Sprintf("%d", types.EgressIPReroutePriority), + libovsdbops.IPFamilyKey: string(ipFamily), + libovsdbops.NetworkKey: network, + }) +} + +func getEIPLRPObjK8MetaData(externalIDs map[string]string) (string, string) { + objMetaDataRaw := externalIDs[libovsdbops.ObjectNameKey.String()] + if objMetaDataRaw == "" || !strings.Contains(objMetaDataRaw, "_") || !strings.Contains(objMetaDataRaw, "/") { + return "", "" + } + objMetaDataSplit := strings.Split(objMetaDataRaw, "_") + if len(objMetaDataSplit) != 2 { + return "", "" + } + return objMetaDataSplit[0], objMetaDataSplit[1] // EgressIP name and "podNamespace/podName" +} + +func getEgressIPLRPNoReRoutePodToJoinDbIDs(ipFamily egressIPFamilyValue, network, controller string) *libovsdbops.DbObjectIDs { + return libovsdbops.NewDbObjectIDs(libovsdbops.LogicalRouterPolicyEgressIP, controller, map[libovsdbops.ExternalIDKey]string{ + libovsdbops.ObjectNameKey: string(NoReRoutePodToJoin), + libovsdbops.PriorityKey: fmt.Sprintf("%d", types.DefaultNoRereoutePriority), + libovsdbops.IPFamilyKey: string(ipFamily), + libovsdbops.NetworkKey: network, + }) +} + +func getEgressIPLRPNoReRoutePodToPodDbIDs(ipFamily egressIPFamilyValue, network, controller string) *libovsdbops.DbObjectIDs { + return libovsdbops.NewDbObjectIDs(libovsdbops.LogicalRouterPolicyEgressIP, controller, map[libovsdbops.ExternalIDKey]string{ + libovsdbops.ObjectNameKey: string(NoReRoutePodToPod), + libovsdbops.PriorityKey: fmt.Sprintf("%d", types.DefaultNoRereoutePriority), + libovsdbops.IPFamilyKey: string(ipFamily), + libovsdbops.NetworkKey: network, + }) +} + +func getEgressIPLRPNoReRoutePodToNodeDbIDs(ipFamily egressIPFamilyValue, network, controller string) *libovsdbops.DbObjectIDs { + return libovsdbops.NewDbObjectIDs(libovsdbops.LogicalRouterPolicyEgressIP, controller, map[libovsdbops.ExternalIDKey]string{ + libovsdbops.ObjectNameKey: string(NoReRoutePodToNode), + libovsdbops.PriorityKey: fmt.Sprintf("%d", types.DefaultNoRereoutePriority), + libovsdbops.IPFamilyKey: string(ipFamily), + libovsdbops.NetworkKey: network, }) } -func getEgressIPLRPNoReRouteDbIDs(priority int, uniqueName egressIPNoReroutePolicyName, ipFamily egressIPFamilyValue) *libovsdbops.DbObjectIDs { - return libovsdbops.NewDbObjectIDs(libovsdbops.LogicalRouterPolicyEgressIP, DefaultNetworkControllerName, map[libovsdbops.ExternalIDKey]string{ +func getEgressIPLRPNoReRouteDbIDs(priority int, uniqueName egressIPNoReroutePolicyName, ipFamily egressIPFamilyValue, network, controller string) *libovsdbops.DbObjectIDs { + return libovsdbops.NewDbObjectIDs(libovsdbops.LogicalRouterPolicyEgressIP, controller, map[libovsdbops.ExternalIDKey]string{ // egress ip creates global no-reroute policies at 102 priority libovsdbops.ObjectNameKey: string(uniqueName), libovsdbops.PriorityKey: fmt.Sprintf("%d", priority), libovsdbops.IPFamilyKey: string(ipFamily), + libovsdbops.NetworkKey: network, }) } -func getEgressIPQoSRuleDbIDs(ipFamily egressIPFamilyValue) *libovsdbops.DbObjectIDs { - return libovsdbops.NewDbObjectIDs(libovsdbops.LogicalRouterPolicyEgressIP, DefaultNetworkControllerName, map[libovsdbops.ExternalIDKey]string{ +func getEgressIPQoSRuleDbIDs(ipFamily egressIPFamilyValue, network, controller string) *libovsdbops.DbObjectIDs { + return libovsdbops.NewDbObjectIDs(libovsdbops.LogicalRouterPolicyEgressIP, controller, map[libovsdbops.ExternalIDKey]string{ // egress ip creates reply traffic marker rule at 103 priority libovsdbops.ObjectNameKey: string(ReplyTrafficMark), libovsdbops.PriorityKey: fmt.Sprintf("%d", types.EgressIPRerouteQoSRulePriority), libovsdbops.IPFamilyKey: string(ipFamily), + libovsdbops.NetworkKey: network, + }) +} + +func getEgressIPLRPSNATMarkDbIDs(eIPName, podNamespace, podName string, ipFamily egressIPFamilyValue, network, controller string) *libovsdbops.DbObjectIDs { + return libovsdbops.NewDbObjectIDs(libovsdbops.LogicalRouterPolicyEgressIP, controller, map[libovsdbops.ExternalIDKey]string{ + libovsdbops.ObjectNameKey: fmt.Sprintf("%s_%s/%s", eIPName, podNamespace, podName), + libovsdbops.PriorityKey: fmt.Sprintf("%d", types.EgressIPSNATMarkPriority), + libovsdbops.IPFamilyKey: string(ipFamily), + libovsdbops.NetworkKey: network, + }) +} + +func getEgressIPNATDbIDs(eIPName, podNamespace, podName string, ipFamily egressIPFamilyValue, controller string) *libovsdbops.DbObjectIDs { + return libovsdbops.NewDbObjectIDs(libovsdbops.NATEgressIP, controller, map[libovsdbops.ExternalIDKey]string{ + libovsdbops.ObjectNameKey: fmt.Sprintf("%s_%s/%s", eIPName, podNamespace, podName), + libovsdbops.IPFamilyKey: string(ipFamily), }) } +// EgressIPController configures OVN to support EgressIP +type EgressIPController struct { + // libovsdb northbound client interface + nbClient libovsdbclient.Client + kube *kube.KubeOVN + watchFactory *factory.WatchFactory + // event recorder used to post events to k8s + recorder record.EventRecorder + // podAssignmentMutex is used to ensure safe access to podAssignment. + // Currently WatchEgressIP, WatchEgressNamespace and WatchEgressPod could + // all access that map simultaneously, hence why this guard is needed. + podAssignmentMutex *sync.Mutex + // nodeUpdateMutex is used for two reasons: + // (1) to ensure safe handling of node ip address updates. VIP addresses are + // dynamic and might move across nodes. + // (2) used in ensureDefaultNoRerouteQoSRules function to ensure + // creating QoS rules is thread safe since otherwise when two nodes are added + // at the same time by two different threads we end up creating duplicate + // QoS rules in database due to libovsdb cache race + nodeUpdateMutex *sync.Mutex + // podAssignment is a cache used for keeping track of which egressIP status + // has been setup for each pod. The key is defined by getPodKey + podAssignment map[string]*podAssignmentState + // logicalPortCache allows access to pod IPs for all networks + logicalPortCache *PortCache + // A cache that maintains all nodes in the cluster, + // value will be true if local to this zone and false otherwise + nodeZoneState *syncmap.SyncMap[bool] + // nadController used for getting network information for UDNs + nadController nad.NADController + // An address set factory that creates address sets + addressSetFactory addressset.AddressSetFactory + // Northbound database zone name to which this Controller is connected to - aka local zone + zone string + v4 bool + v6 bool + // controllerName is the name of the controller. For backward compatibility reasons, this is the default network controller name. + controllerName string +} + +func NewEIPController(nbClient libovsdbclient.Client, kube *kube.KubeOVN, watchFactory *factory.WatchFactory, recorder record.EventRecorder, + portCache *PortCache, nadController nad.NADController, addressSetFactor addressset.AddressSetFactory, v4, v6 bool, zone, controllerName string) *EgressIPController { + e := &EgressIPController{ + nbClient: nbClient, + kube: kube, + watchFactory: watchFactory, + recorder: recorder, + podAssignmentMutex: &sync.Mutex{}, + nodeUpdateMutex: &sync.Mutex{}, + podAssignment: map[string]*podAssignmentState{}, + logicalPortCache: portCache, + nodeZoneState: syncmap.NewSyncMap[bool](), + controllerName: controllerName, + nadController: nadController, + addressSetFactory: addressSetFactor, + zone: zone, + v4: v4, + v6: v6, + } + return e +} + // main reconcile functions begin here // reconcileEgressIP reconciles the database configuration @@ -100,21 +243,26 @@ func getEgressIPQoSRuleDbIDs(ipFamily egressIPFamilyValue) *libovsdbops.DbObject // NOTE: `Spec.EgressIPs“ updates for EIP object are not processed here, that is the job of cluster manager // // We only care about `Spec.NamespaceSelector`, `Spec.PodSelector` and `Status` field -func (oc *DefaultNetworkController) reconcileEgressIP(old, new *egressipv1.EgressIP) (err error) { +func (e *EgressIPController) reconcileEgressIP(old, new *egressipv1.EgressIP) (err error) { // CASE 1: EIP object deletion, we need to teardown database configuration for all the statuses if old != nil && new == nil { removeStatus := old.Status.Items if len(removeStatus) > 0 { - if err := oc.deleteEgressIPAssignments(old.Name, removeStatus); err != nil { + if err := e.deleteEgressIPAssignments(old.Name, removeStatus); err != nil { return err } } } + var mark util.EgressIPMark + if new != nil { + mark = getEgressIPPktMark(new.Name, new.Annotations) + } + // CASE 2: EIP object addition, we need to setup database configuration for all the statuses if old == nil && new != nil { addStatus := new.Status.Items if len(addStatus) > 0 { - if err := oc.addEgressIPAssignments(new.Name, addStatus, new.Spec.NamespaceSelector, new.Spec.PodSelector); err != nil { + if err := e.addEgressIPAssignments(new.Name, addStatus, mark, new.Spec.NamespaceSelector, new.Spec.PodSelector); err != nil { return err } } @@ -145,7 +293,7 @@ func (oc *DefaultNetworkController) reconcileEgressIP(old, new *egressipv1.Egres statusToDelete = append(statusToDelete, oldStatus) } if len(statusToDelete) > 0 { - if err := oc.deleteEgressIPAssignments(old.Name, statusToDelete); err != nil { + if err := e.deleteEgressIPAssignments(old.Name, statusToDelete); err != nil { return err } } @@ -158,7 +306,7 @@ func (oc *DefaultNetworkController) reconcileEgressIP(old, new *egressipv1.Egres statusToAdd = append(statusToAdd, newStatus) } if len(statusToAdd) > 0 { - if err := oc.addEgressIPAssignments(new.Name, statusToAdd, new.Spec.NamespaceSelector, new.Spec.PodSelector); err != nil { + if err := e.addEgressIPAssignments(new.Name, statusToAdd, mark, new.Spec.NamespaceSelector, new.Spec.PodSelector); err != nil { return err } } @@ -185,20 +333,28 @@ func (oc *DefaultNetworkController) reconcileEgressIP(old, new *egressipv1.Egres // matching the old and not matching the new, and add setup for the pod // matching the new and which didn't match the old. if !reflect.DeepEqual(newNamespaceSelector, oldNamespaceSelector) && reflect.DeepEqual(newPodSelector, oldPodSelector) { - namespaces, err := oc.watchFactory.GetNamespaces() + namespaces, err := e.watchFactory.GetNamespaces() if err != nil { return err } for _, namespace := range namespaces { namespaceLabels := labels.Set(namespace.Labels) if !newNamespaceSelector.Matches(namespaceLabels) && oldNamespaceSelector.Matches(namespaceLabels) { - if err := oc.deleteNamespaceEgressIPAssignment(oldEIP.Name, oldEIP.Status.Items, namespace, oldEIP.Spec.PodSelector); err != nil { - return err + ni, err := e.nadController.GetActiveNetworkForNamespace(namespace.Name) + if err != nil { + return fmt.Errorf("failed to get active network for namespace %s: %v", namespace.Name, err) + } + if err := e.deleteNamespaceEgressIPAssignment(ni, oldEIP.Name, oldEIP.Status.Items, namespace, oldEIP.Spec.PodSelector); err != nil { + return fmt.Errorf("network %s: failed to delete namespace %s egress IP config: %v", ni.GetNetworkName(), namespace.Name, err) } } if newNamespaceSelector.Matches(namespaceLabels) && !oldNamespaceSelector.Matches(namespaceLabels) { - if err := oc.addNamespaceEgressIPAssignments(newEIP.Name, newEIP.Status.Items, namespace, newEIP.Spec.PodSelector); err != nil { - return err + ni, err := e.nadController.GetActiveNetworkForNamespace(namespace.Name) + if err != nil { + return fmt.Errorf("failed to get active network for namespace %s: %v", namespace.Name, err) + } + if err := e.addNamespaceEgressIPAssignments(ni, newEIP.Name, newEIP.Status.Items, mark, namespace, newEIP.Spec.PodSelector); err != nil { + return fmt.Errorf("network %s: failed to add namespace %s egress IP config: %v", ni.GetNetworkName(), namespace.Name, err) } } } @@ -207,28 +363,36 @@ func (oc *DefaultNetworkController) reconcileEgressIP(old, new *egressipv1.Egres // matching the old and not matching the new, and add setup for the pod // matching the new and which didn't match the old. } else if reflect.DeepEqual(newNamespaceSelector, oldNamespaceSelector) && !reflect.DeepEqual(newPodSelector, oldPodSelector) { - namespaces, err := oc.watchFactory.GetNamespacesBySelector(newEIP.Spec.NamespaceSelector) + namespaces, err := e.watchFactory.GetNamespacesBySelector(newEIP.Spec.NamespaceSelector) if err != nil { return err } for _, namespace := range namespaces { - pods, err := oc.watchFactory.GetPods(namespace.Name) + pods, err := e.watchFactory.GetPods(namespace.Name) if err != nil { return err } for _, pod := range pods { podLabels := labels.Set(pod.Labels) if !newPodSelector.Matches(podLabels) && oldPodSelector.Matches(podLabels) { - if err := oc.deletePodEgressIPAssignments(oldEIP.Name, oldEIP.Status.Items, pod); err != nil { - return err + ni, err := e.nadController.GetActiveNetworkForNamespace(namespace.Name) + if err != nil { + return fmt.Errorf("failed to get active network for namespace %s: %v", namespace.Name, err) + } + if err := e.deletePodEgressIPAssignmentsWithCleanup(ni, oldEIP.Name, oldEIP.Status.Items, pod); err != nil { + return fmt.Errorf("network %s: failed to delete pod %s/%s egress IP config: %v", ni.GetNetworkName(), pod.Namespace, pod.Name, err) } } if util.PodCompleted(pod) { continue } if newPodSelector.Matches(podLabels) && !oldPodSelector.Matches(podLabels) { - if err := oc.addPodEgressIPAssignmentsWithLock(newEIP.Name, newEIP.Status.Items, pod); err != nil { - return err + ni, err := e.nadController.GetActiveNetworkForNamespace(namespace.Name) + if err != nil { + return fmt.Errorf("failed to get active network for namespace %s: %v", namespace.Name, err) + } + if err := e.addPodEgressIPAssignmentsWithLock(ni, newEIP.Name, newEIP.Status.Items, mark, pod); err != nil { + return fmt.Errorf("network %s: failed to add pod %s/%s egress IP config: %v", ni.GetNetworkName(), pod.Namespace, pod.Name, err) } } } @@ -238,7 +402,7 @@ func (oc *DefaultNetworkController) reconcileEgressIP(old, new *egressipv1.Egres // old ones and not matching the new ones, and add setup for all // matching the new ones but which didn't match the old ones. } else if !reflect.DeepEqual(newNamespaceSelector, oldNamespaceSelector) && !reflect.DeepEqual(newPodSelector, oldPodSelector) { - namespaces, err := oc.watchFactory.GetNamespaces() + namespaces, err := e.watchFactory.GetNamespaces() if err != nil { return err } @@ -246,9 +410,13 @@ func (oc *DefaultNetworkController) reconcileEgressIP(old, new *egressipv1.Egres namespaceLabels := labels.Set(namespace.Labels) // If the namespace does not match anymore then there's no // reason to look at the pod selector. + ni, err := e.nadController.GetActiveNetworkForNamespace(namespace.Name) + if err != nil { + return fmt.Errorf("failed to get active network for namespace %s: %v", namespace.Name, err) + } if !newNamespaceSelector.Matches(namespaceLabels) && oldNamespaceSelector.Matches(namespaceLabels) { - if err := oc.deleteNamespaceEgressIPAssignment(oldEIP.Name, oldEIP.Status.Items, namespace, oldEIP.Spec.PodSelector); err != nil { - return err + if err := e.deleteNamespaceEgressIPAssignment(ni, oldEIP.Name, oldEIP.Status.Items, namespace, oldEIP.Spec.PodSelector); err != nil { + return fmt.Errorf("network %s: failed to delete namespace %s egress IP config: %v", ni.GetNetworkName(), namespace.Name, err) } } // If the namespace starts matching, look at the pods selector @@ -256,15 +424,15 @@ func (oc *DefaultNetworkController) reconcileEgressIP(old, new *egressipv1.Egres // which match the new pod selector or if the podSelector is empty // then just perform the setup. if newNamespaceSelector.Matches(namespaceLabels) && !oldNamespaceSelector.Matches(namespaceLabels) { - pods, err := oc.watchFactory.GetPods(namespace.Name) + pods, err := e.watchFactory.GetPods(namespace.Name) if err != nil { return err } for _, pod := range pods { podLabels := labels.Set(pod.Labels) if newPodSelector.Matches(podLabels) { - if err := oc.addPodEgressIPAssignmentsWithLock(newEIP.Name, newEIP.Status.Items, pod); err != nil { - return err + if err := e.addPodEgressIPAssignmentsWithLock(ni, newEIP.Name, newEIP.Status.Items, mark, pod); err != nil { + return fmt.Errorf("network %s: failed to add pod %s/%s egress IP config: %v", ni.GetNetworkName(), pod.Namespace, pod.Name, err) } } } @@ -272,23 +440,23 @@ func (oc *DefaultNetworkController) reconcileEgressIP(old, new *egressipv1.Egres // If the namespace continues to match, look at the pods // selector and pods in that namespace. if newNamespaceSelector.Matches(namespaceLabels) && oldNamespaceSelector.Matches(namespaceLabels) { - pods, err := oc.watchFactory.GetPods(namespace.Name) + pods, err := e.watchFactory.GetPods(namespace.Name) if err != nil { return err } for _, pod := range pods { podLabels := labels.Set(pod.Labels) if !newPodSelector.Matches(podLabels) && oldPodSelector.Matches(podLabels) { - if err := oc.deletePodEgressIPAssignments(oldEIP.Name, oldEIP.Status.Items, pod); err != nil { - return err + if err := e.deletePodEgressIPAssignmentsWithCleanup(ni, oldEIP.Name, oldEIP.Status.Items, pod); err != nil { + return fmt.Errorf("network %s: failed to delete pod %s/%s egress IP config: %v", ni.GetNetworkName(), pod.Namespace, pod.Name, err) } } if util.PodCompleted(pod) { continue } if newPodSelector.Matches(podLabels) && !oldPodSelector.Matches(podLabels) { - if err := oc.addPodEgressIPAssignmentsWithLock(newEIP.Name, newEIP.Status.Items, pod); err != nil { - return err + if err := e.addPodEgressIPAssignmentsWithLock(ni, newEIP.Name, newEIP.Status.Items, mark, pod); err != nil { + return fmt.Errorf("network %s: failed to add pod %s/%s egress IP config: %v", ni.GetNetworkName(), pod.Namespace, pod.Name, err) } } } @@ -302,15 +470,18 @@ func (oc *DefaultNetworkController) reconcileEgressIP(old, new *egressipv1.Egres // reconcileEgressIPNamespace reconciles the database configuration setup in nbdb // based on received namespace objects. // NOTE: we only care about namespace label updates -func (oc *DefaultNetworkController) reconcileEgressIPNamespace(old, new *v1.Namespace) error { +func (e *EgressIPController) reconcileEgressIPNamespace(old, new *corev1.Namespace) error { // Same as for reconcileEgressIP: labels play nicely with empty object, not // nil ones. - oldNamespace, newNamespace := &v1.Namespace{}, &v1.Namespace{} + var namespaceName string + oldNamespace, newNamespace := &corev1.Namespace{}, &corev1.Namespace{} if old != nil { oldNamespace = old + namespaceName = old.Name } if new != nil { newNamespace = new + namespaceName = new.Name } // If the labels have not changed, then there's no change that we care @@ -327,7 +498,7 @@ func (oc *DefaultNetworkController) reconcileEgressIPNamespace(old, new *v1.Name // all "blue" pods in namespace A, and a second EgressIP object match all // "red" pods in namespace A), so continue iterating all EgressIP objects // before finishing. - egressIPs, err := oc.watchFactory.GetEgressIPs() + egressIPs, err := e.watchFactory.GetEgressIPs() if err != nil { return err } @@ -337,13 +508,22 @@ func (oc *DefaultNetworkController) reconcileEgressIPNamespace(old, new *v1.Name return err } if namespaceSelector.Matches(oldLabels) && !namespaceSelector.Matches(newLabels) { - if err := oc.deleteNamespaceEgressIPAssignment(egressIP.Name, egressIP.Status.Items, oldNamespace, egressIP.Spec.PodSelector); err != nil { - return err + ni, err := e.nadController.GetActiveNetworkForNamespace(namespaceName) + if err != nil { + return fmt.Errorf("failed to get active network for namespace %s: %v", namespaceName, err) + } + if err := e.deleteNamespaceEgressIPAssignment(ni, egressIP.Name, egressIP.Status.Items, oldNamespace, egressIP.Spec.PodSelector); err != nil { + return fmt.Errorf("network %s: failed to delete namespace %s egress IP config: %v", ni.GetNetworkName(), namespaceName, err) } } if !namespaceSelector.Matches(oldLabels) && namespaceSelector.Matches(newLabels) { - if err := oc.addNamespaceEgressIPAssignments(egressIP.Name, egressIP.Status.Items, newNamespace, egressIP.Spec.PodSelector); err != nil { - return err + mark := getEgressIPPktMark(egressIP.Name, egressIP.Annotations) + ni, err := e.nadController.GetActiveNetworkForNamespace(namespaceName) + if err != nil { + return fmt.Errorf("failed to get active network for namespace %s: %v", namespaceName, err) + } + if err := e.addNamespaceEgressIPAssignments(ni, egressIP.Name, egressIP.Status.Items, mark, newNamespace, egressIP.Spec.PodSelector); err != nil { + return fmt.Errorf("network %s: failed to add namespace %s egress IP config: %v", ni.GetNetworkName(), namespaceName, err) } } } @@ -353,12 +533,12 @@ func (oc *DefaultNetworkController) reconcileEgressIPNamespace(old, new *v1.Name // reconcileEgressIPPod reconciles the database configuration setup in nbdb // based on received pod objects. // NOTE: we only care about pod label updates -func (oc *DefaultNetworkController) reconcileEgressIPPod(old, new *v1.Pod) (err error) { - oldPod, newPod := &v1.Pod{}, &v1.Pod{} - namespace := &v1.Namespace{} +func (e *EgressIPController) reconcileEgressIPPod(old, new *corev1.Pod) (err error) { + oldPod, newPod := &corev1.Pod{}, &corev1.Pod{} + namespace := &corev1.Namespace{} if old != nil { oldPod = old - namespace, err = oc.watchFactory.GetNamespace(oldPod.Namespace) + namespace, err = e.watchFactory.GetNamespace(oldPod.Namespace) if err != nil { // when the whole namespace gets removed, we can ignore the NotFound error here // any potential configuration will get removed in reconcileEgressIPNamespace @@ -371,7 +551,7 @@ func (oc *DefaultNetworkController) reconcileEgressIPPod(old, new *v1.Pod) (err } if new != nil { newPod = new - namespace, err = oc.watchFactory.GetNamespace(newPod.Namespace) + namespace, err = e.watchFactory.GetNamespace(newPod.Namespace) if err != nil { return err } @@ -397,7 +577,7 @@ func (oc *DefaultNetworkController) reconcileEgressIPPod(old, new *v1.Pod) (err // gets changed to a blue label: we need add and remove the set up for both // EgressIP obejcts - since we can't be sure of which EgressIP object we // process first, always iterate all. - egressIPs, err := oc.watchFactory.GetEgressIPs() + egressIPs, err := e.watchFactory.GetEgressIPs() if err != nil { return err } @@ -413,10 +593,15 @@ func (oc *DefaultNetworkController) reconcileEgressIPPod(old, new *v1.Pod) (err // match only a subset of pods in the namespace, and we'll have to // check that. If there is no podSelector: the user intends it to // match all pods in the namespace. + mark := getEgressIPPktMark(egressIP.Name, egressIP.Annotations) podSelector, err := metav1.LabelSelectorAsSelector(&egressIP.Spec.PodSelector) if err != nil { return err } + ni, err := e.nadController.GetActiveNetworkForNamespace(namespace.Name) + if err != nil { + return fmt.Errorf("failed to get active network for namespace %s: %v", namespace.Name, err) + } if !podSelector.Empty() { // Use "new" and "old" instead of "newPod" and "oldPod" to determine whether // pods was created or is being deleted. @@ -432,8 +617,8 @@ func (oc *DefaultNetworkController) reconcileEgressIPPod(old, new *v1.Pod) (err // Check if the pod stopped matching. If the pod was deleted, // "new" will be nil, so this must account for that case. if !newMatches && oldMatches { - if err := oc.deletePodEgressIPAssignments(egressIP.Name, egressIP.Status.Items, oldPod); err != nil { - return err + if err := e.deletePodEgressIPAssignmentsWithCleanup(ni, egressIP.Name, egressIP.Status.Items, oldPod); err != nil { + return fmt.Errorf("network %s: failed to delete pod %s/%s egress IP config: %v", ni.GetNetworkName(), oldPod.Namespace, oldPod.Name, err) } continue } @@ -444,8 +629,8 @@ func (oc *DefaultNetworkController) reconcileEgressIPPod(old, new *v1.Pod) (err // IPs assigned at that point and we need to continue trying the // pod setup for every pod update as to make sure we process the // pod IP assignment. - if err := oc.addPodEgressIPAssignmentsWithLock(egressIP.Name, egressIP.Status.Items, newPod); err != nil { - return err + if err := e.addPodEgressIPAssignmentsWithLock(ni, egressIP.Name, egressIP.Status.Items, mark, newPod); err != nil { + return fmt.Errorf("network %s: failed to add pod %s/%s egress IP config: %v", ni.GetNetworkName(), newPod.Namespace, newPod.Name, err) } continue } @@ -453,14 +638,14 @@ func (oc *DefaultNetworkController) reconcileEgressIPPod(old, new *v1.Pod) (err // to match all pods in the namespace) and the pod has been deleted: // "new" will be nil and we need to remove the setup if new == nil { - if err := oc.deletePodEgressIPAssignments(egressIP.Name, egressIP.Status.Items, oldPod); err != nil { - return err + if err := e.deletePodEgressIPAssignmentsWithCleanup(ni, egressIP.Name, egressIP.Status.Items, oldPod); err != nil { + return fmt.Errorf("network %s: failed to delete pod %s/%s egress IP config: %v", ni.GetNetworkName(), oldPod.Namespace, oldPod.Name, err) } continue } // For all else, perform a setup for the pod - if err := oc.addPodEgressIPAssignmentsWithLock(egressIP.Name, egressIP.Status.Items, newPod); err != nil { - return err + if err := e.addPodEgressIPAssignmentsWithLock(ni, egressIP.Name, egressIP.Status.Items, mark, newPod); err != nil { + return fmt.Errorf("network %s: failed to add pod %s/%s egress IP config: %v", ni.GetNetworkName(), newPod.Namespace, newPod.Name, err) } } } @@ -469,20 +654,25 @@ func (oc *DefaultNetworkController) reconcileEgressIPPod(old, new *v1.Pod) (err // main reconcile functions end here and local zone controller functions begin -func (oc *DefaultNetworkController) addEgressIPAssignments(name string, statusAssignments []egressipv1.EgressIPStatusItem, namespaceSelector, podSelector metav1.LabelSelector) error { - namespaces, err := oc.watchFactory.GetNamespacesBySelector(namespaceSelector) +func (e *EgressIPController) addEgressIPAssignments(name string, statusAssignments []egressipv1.EgressIPStatusItem, mark util.EgressIPMark, namespaceSelector, podSelector metav1.LabelSelector) error { + namespaces, err := e.watchFactory.GetNamespacesBySelector(namespaceSelector) if err != nil { return err } for _, namespace := range namespaces { - if err := oc.addNamespaceEgressIPAssignments(name, statusAssignments, namespace, podSelector); err != nil { + ni, err := e.nadController.GetActiveNetworkForNamespace(namespace.Name) + if err != nil { + return fmt.Errorf("failed to get active network for namespace %s: %v", namespace.Name, err) + } + if err := e.addNamespaceEgressIPAssignments(ni, name, statusAssignments, mark, namespace, podSelector); err != nil { return err } } return nil } -func (oc *DefaultNetworkController) addNamespaceEgressIPAssignments(name string, statusAssignments []egressipv1.EgressIPStatusItem, namespace *kapi.Namespace, podSelector metav1.LabelSelector) error { +func (e *EgressIPController) addNamespaceEgressIPAssignments(ni util.NetInfo, name string, statusAssignments []egressipv1.EgressIPStatusItem, mark util.EgressIPMark, + namespace *kapi.Namespace, podSelector metav1.LabelSelector) error { var pods []*kapi.Pod var err error selector, err := metav1.LabelSelectorAsSelector(&podSelector) @@ -490,35 +680,36 @@ func (oc *DefaultNetworkController) addNamespaceEgressIPAssignments(name string, return err } if !selector.Empty() { - pods, err = oc.watchFactory.GetPodsBySelector(namespace.Name, podSelector) + pods, err = e.watchFactory.GetPodsBySelector(namespace.Name, podSelector) if err != nil { return err } } else { - pods, err = oc.watchFactory.GetPods(namespace.Name) + pods, err = e.watchFactory.GetPods(namespace.Name) if err != nil { return err } } for _, pod := range pods { - if err := oc.addPodEgressIPAssignmentsWithLock(name, statusAssignments, pod); err != nil { + if err := e.addPodEgressIPAssignmentsWithLock(ni, name, statusAssignments, mark, pod); err != nil { return err } } return nil } -func (oc *DefaultNetworkController) addPodEgressIPAssignmentsWithLock(name string, statusAssignments []egressipv1.EgressIPStatusItem, pod *kapi.Pod) error { - oc.eIPC.podAssignmentMutex.Lock() - defer oc.eIPC.podAssignmentMutex.Unlock() - return oc.addPodEgressIPAssignments(name, statusAssignments, pod) +func (e *EgressIPController) addPodEgressIPAssignmentsWithLock(ni util.NetInfo, name string, statusAssignments []egressipv1.EgressIPStatusItem, mark util.EgressIPMark, pod *kapi.Pod) error { + e.deletePreviousNetworkPodEgressIPAssignments(ni, name, statusAssignments, pod) + e.podAssignmentMutex.Lock() + defer e.podAssignmentMutex.Unlock() + return e.addPodEgressIPAssignments(ni, name, statusAssignments, mark, pod) } // addPodEgressIPAssignments tracks the setup made for each egress IP matching // pod w.r.t to each status. This is mainly done to avoid a lot of duplicated // work on ovnkube-master restarts when all egress IP handlers will most likely // match and perform the setup for the same pod and status multiple times over. -func (oc *DefaultNetworkController) addPodEgressIPAssignments(name string, statusAssignments []egressipv1.EgressIPStatusItem, pod *kapi.Pod) error { +func (e *EgressIPController) addPodEgressIPAssignments(ni util.NetInfo, name string, statusAssignments []egressipv1.EgressIPStatusItem, mark util.EgressIPMark, pod *kapi.Pod) error { podKey := getPodKey(pod) // If pod is already in succeeded or failed state, return it without proceeding further. if util.PodCompleted(pod) { @@ -537,58 +728,49 @@ func (oc *DefaultNetworkController) addPodEgressIPAssignments(name string, statu // 2) the pod being added is local to this zone proceed := false for _, status := range statusAssignments { - oc.eIPC.nodeZoneState.LockKey(status.Node) - isLocalZoneEgressNode, loadedEgressNode := oc.eIPC.nodeZoneState.Load(status.Node) + e.nodeZoneState.LockKey(status.Node) + isLocalZoneEgressNode, loadedEgressNode := e.nodeZoneState.Load(status.Node) if loadedEgressNode && isLocalZoneEgressNode { proceed = true - oc.eIPC.nodeZoneState.UnlockKey(status.Node) + e.nodeZoneState.UnlockKey(status.Node) break } - oc.eIPC.nodeZoneState.UnlockKey(status.Node) + e.nodeZoneState.UnlockKey(status.Node) } - if !proceed && !oc.isPodScheduledinLocalZone(pod) { + if !proceed && !e.isPodScheduledinLocalZone(pod) { return nil // nothing to do if none of the status nodes are local to this master and pod is also remote } var remainingAssignments []egressipv1.EgressIPStatusItem - var podIPs []*net.IPNet - var err error - if oc.isPodScheduledinLocalZone(pod) { - // Retrieve the pod's networking configuration from the - // logicalPortCache. The reason for doing this: a) only normal network - // pods are placed in this cache, b) once the pod is placed here we know - // addLogicalPort has finished successfully setting up networking for - // the pod, so we can proceed with retrieving its IP and deleting the - // external GW configuration created in addLogicalPort for the pod. - logicalPort, err := oc.logicalPortCache.get(pod, types.DefaultNetworkName) - if err != nil { - return nil - } - // Since the logical switch port cache removes entries only 60 seconds - // after deletion, its possible that when pod is recreated with the same name - // within the 60seconds timer, stale info gets used to create SNATs and reroutes - // for the eip pods. Checking if the expiry is set for the port or not can indicate - // if the port is scheduled for deletion. - if !logicalPort.expires.IsZero() { - klog.Warningf("Stale LSP %s for pod %s found in cache refetching", - logicalPort.name, podKey) - return nil - } - podIPs = logicalPort.ips - } else { // means this is egress node's local master - podIPs, err = util.GetPodCIDRsWithFullMask(pod, oc.NetInfo) - if err != nil { - return err + nadName := ni.GetNetworkName() + if ni.IsSecondary() { + nadNames := ni.GetNADs() + if len(nadNames) == 0 { + return fmt.Errorf("expected at least one NAD name for Namespace %s", pod.Namespace) } + nadName = nadNames[0] // there should only be one active network + } + podIPNets, err := e.getPodIPs(ni, pod, nadName) + if err != nil { + return fmt.Errorf("failed to get pod %s/%s IPs: %v", pod.Namespace, pod.Name, err) } - podState, exists := oc.eIPC.podAssignment[podKey] + if len(podIPNets) == 0 { + return fmt.Errorf("failed to get pod ips for pod %s on network %s with NAD name %s", podKey, ni.GetNetworkName(), nadName) + } + podIPs := make([]net.IP, 0, len(podIPNets)) + for _, ipNet := range podIPNets { + podIPs = append(podIPs, ipNet.IP) + } + podState, exists := e.podAssignment[podKey] if !exists { remainingAssignments = statusAssignments podState = &podAssignmentState{ egressIPName: name, egressStatuses: egressStatuses{make(map[egressipv1.EgressIPStatusItem]string)}, standbyEgressIPNames: sets.New[string](), + podIPs: podIPs, + network: ni, } - oc.eIPC.podAssignment[podKey] = podState + e.podAssignment[podKey] = podState } else if podState.egressIPName == name || podState.egressIPName == "" { // We do the setup only if this egressIP object is the one serving this pod OR // podState.egressIPName can be empty if no re-routes were found in @@ -598,7 +780,9 @@ func (oc *DefaultNetworkController) addPodEgressIPAssignments(name string, statu remainingAssignments = append(remainingAssignments, status) } } + podState.podIPs = podIPs podState.egressIPName = name + podState.network = ni podState.standbyEgressIPNames.Delete(name) } else if podState.egressIPName != name { klog.Warningf("EgressIP object %s will not be configured for pod %s "+ @@ -607,7 +791,7 @@ func (oc *DefaultNetworkController) addPodEgressIPAssignments(name string, statu Kind: "EgressIP", Name: name, } - oc.recorder.Eventf( + e.recorder.Eventf( &eIPRef, kapi.EventTypeWarning, "UndefinedRequest", @@ -617,20 +801,20 @@ func (oc *DefaultNetworkController) addPodEgressIPAssignments(name string, statu return nil } for _, status := range remainingAssignments { - klog.V(2).Infof("Adding pod egress IP status: %v for EgressIP: %s and pod: %s/%s/%v", status, name, pod.Namespace, pod.Name, podIPs) - err = oc.eIPC.nodeZoneState.DoWithLock(status.Node, func(key string) error { + klog.V(2).Infof("Adding pod egress IP status: %v for EgressIP: %s and pod: %s/%s/%v", status, name, pod.Namespace, pod.Name, podIPNets) + err = e.nodeZoneState.DoWithLock(status.Node, func(key string) error { if status.Node == pod.Spec.NodeName { // we are safe, no need to grab lock again - if err := oc.eIPC.addPodEgressIPAssignment(name, status, pod, podIPs); err != nil { - return fmt.Errorf("unable to create egressip configuration for pod %s/%s/%v, err: %w", pod.Namespace, pod.Name, podIPs, err) + if err := e.addPodEgressIPAssignment(ni, name, status, mark, pod, podIPNets); err != nil { + return fmt.Errorf("unable to create egressip configuration for pod %s/%s/%v, err: %w", pod.Namespace, pod.Name, podIPNets, err) } podState.egressStatuses.statusMap[status] = "" return nil } - return oc.eIPC.nodeZoneState.DoWithLock(pod.Spec.NodeName, func(key string) error { + return e.nodeZoneState.DoWithLock(pod.Spec.NodeName, func(key string) error { // we need to grab lock again for pod's node - if err := oc.eIPC.addPodEgressIPAssignment(name, status, pod, podIPs); err != nil { - return fmt.Errorf("unable to create egressip configuration for pod %s/%s/%v, err: %w", pod.Namespace, pod.Name, podIPs, err) + if err := e.addPodEgressIPAssignment(ni, name, status, mark, pod, podIPNets); err != nil { + return fmt.Errorf("unable to create egressip configuration for pod %s/%s/%v, err: %w", pod.Namespace, pod.Name, podIPNets, err) } podState.egressStatuses.statusMap[status] = "" return nil @@ -640,14 +824,8 @@ func (oc *DefaultNetworkController) addPodEgressIPAssignments(name string, statu return err } } - if oc.isPodScheduledinLocalZone(pod) { - // add the podIP to the global egressIP address set - addrSetIPs := make([]net.IP, len(podIPs)) - for i, podIP := range podIPs { - copyPodIP := *podIP - addrSetIPs[i] = copyPodIP.IP - } - if err := oc.addPodIPsToAddressSet(addrSetIPs); err != nil { + if e.isPodScheduledinLocalZone(pod) { + if err := e.addPodIPsToAddressSet(ni.GetNetworkName(), e.controllerName, podIPs...); err != nil { return fmt.Errorf("cannot add egressPodIPs for the pod %s/%s to the address set: err: %v", pod.Namespace, pod.Name, err) } } @@ -659,14 +837,13 @@ func (oc *DefaultNetworkController) addPodEgressIPAssignments(name string, statu // the NB DB for that egress IP object and delete everything which match the // status. We also need to update the podAssignment cache and finally re-add the // external GW setup in case the pod still exists. -func (oc *DefaultNetworkController) deleteEgressIPAssignments(name string, statusesToRemove []egressipv1.EgressIPStatusItem) error { - oc.eIPC.podAssignmentMutex.Lock() - defer oc.eIPC.podAssignmentMutex.Unlock() - var podIPs []net.IP - var err error +func (e *EgressIPController) deleteEgressIPAssignments(name string, statusesToRemove []egressipv1.EgressIPStatusItem) error { + e.podAssignmentMutex.Lock() + defer e.podAssignmentMutex.Unlock() + for _, statusToRemove := range statusesToRemove { - removed := false - for podKey, podStatus := range oc.eIPC.podAssignment { + processedNetworks := make(map[string]struct{}) + for podKey, podStatus := range e.podAssignment { if podStatus.egressIPName != name { // we can continue here since this pod was not managed by this EIP object podStatus.standbyEgressIPNames.Delete(name) @@ -676,18 +853,28 @@ func (oc *DefaultNetworkController) deleteEgressIPAssignments(name string, statu // we can continue here since this pod was not managed by this statusToRemove continue } - err = oc.eIPC.nodeZoneState.DoWithLock(statusToRemove.Node, func(key string) error { + podNamespace, podName := getPodNamespaceAndNameFromKey(podKey) + ni, err := e.nadController.GetActiveNetworkForNamespace(podNamespace) + if err != nil { + return fmt.Errorf("failed to get active network for namespace %s", podNamespace) + } + cachedNetwork := e.getNetworkFromPodAssignment(podKey) + err = e.nodeZoneState.DoWithLock(statusToRemove.Node, func(key string) error { // this statusToRemove was managing at least one pod, hence let's tear down the setup for this status - if !removed { + if _, ok := processedNetworks[ni.GetNetworkName()]; !ok { klog.V(2).Infof("Deleting pod egress IP status: %v for EgressIP: %s", statusToRemove, name) - if podIPs, err = oc.eIPC.deleteEgressIPStatusSetup(name, statusToRemove); err != nil { - return err + if err := e.deleteEgressIPStatusSetup(ni, name, statusToRemove); err != nil { + return fmt.Errorf("failed to delete EgressIP %s status setup for network %s: %v", name, ni.GetNetworkName(), err) + } + if cachedNetwork != nil && !cachedNetwork.Equals(ni) { + if err := e.deleteEgressIPStatusSetup(cachedNetwork, name, statusToRemove); err != nil { + klog.Errorf("Failed to delete EgressIP %s status setup for network %s: %v", name, cachedNetwork.GetNetworkName(), err) + } } - removed = true // we should only tear down once and not per pod since tear down is based on externalIDs } + processedNetworks[ni.GetNetworkName()] = struct{}{} // this pod was managed by statusToRemove.EgressIP; we need to try and add its SNAT back towards nodeIP - podNamespace, podName := getPodNamespaceAndNameFromKey(podKey) - if err = oc.eIPC.addExternalGWPodSNAT(podNamespace, podName, statusToRemove); err != nil { + if err := e.addExternalGWPodSNAT(ni, podNamespace, podName, statusToRemove); err != nil { return err } podStatus.egressStatuses.delete(statusToRemove) @@ -704,14 +891,14 @@ func (oc *DefaultNetworkController) deleteEgressIPAssignments(name string, statu // delete the podIP from the global egressIP address set since its no longer managed by egressIPs // NOTE(tssurya): There is no way to infer if pod was local to this zone or not, // so we try to nuke the IP from address-set anyways - it will be a no-op for remote pods - if err := oc.deletePodIPsFromAddressSet(podIPs); err != nil { + if err := e.deletePodIPsFromAddressSet(ni.GetNetworkName(), e.controllerName, podStatus.podIPs...); err != nil { return fmt.Errorf("cannot delete egressPodIPs for the pod %s from the address set: err: %v", podKey, err) } - delete(oc.eIPC.podAssignment, podKey) + delete(e.podAssignment, podKey) } else if len(podStatus.egressStatuses.statusMap) == 0 && len(podStatus.standbyEgressIPNames) > 0 { klog.V(2).Infof("Pod %s has standby egress IP %+v", podKey, podStatus.standbyEgressIPNames.UnsortedList()) podStatus.egressIPName = "" // we have deleted the current egressIP that was managing the pod - if err := oc.addStandByEgressIPAssignment(podKey, podStatus); err != nil { + if err := e.addStandByEgressIPAssignment(ni, podKey, podStatus); err != nil { klog.Errorf("Adding standby egressIPs for pod %s with status %v failed: %v", podKey, podStatus, err) // We are not returning the error on purpose, this will be best effort without any retries because // retrying deleteEgressIPAssignments for original EIP because addStandByEgressIPAssignment failed is useless. @@ -726,7 +913,7 @@ func (oc *DefaultNetworkController) deleteEgressIPAssignments(name string, statu return nil } -func (oc *DefaultNetworkController) deleteNamespaceEgressIPAssignment(name string, statusAssignments []egressipv1.EgressIPStatusItem, namespace *kapi.Namespace, podSelector metav1.LabelSelector) error { +func (e *EgressIPController) deleteNamespaceEgressIPAssignment(ni util.NetInfo, name string, statusAssignments []egressipv1.EgressIPStatusItem, namespace *kapi.Namespace, podSelector metav1.LabelSelector) error { var pods []*kapi.Pod var err error selector, err := metav1.LabelSelectorAsSelector(&podSelector) @@ -734,29 +921,35 @@ func (oc *DefaultNetworkController) deleteNamespaceEgressIPAssignment(name strin return err } if !selector.Empty() { - pods, err = oc.watchFactory.GetPodsBySelector(namespace.Name, podSelector) + pods, err = e.watchFactory.GetPodsBySelector(namespace.Name, podSelector) if err != nil { return err } } else { - pods, err = oc.watchFactory.GetPods(namespace.Name) + pods, err = e.watchFactory.GetPods(namespace.Name) if err != nil { return err } } for _, pod := range pods { - if err := oc.deletePodEgressIPAssignments(name, statusAssignments, pod); err != nil { - return err + if err := e.deletePodEgressIPAssignmentsWithCleanup(ni, name, statusAssignments, pod); err != nil { + return fmt.Errorf("failed to delete EgressIP %s assignment for pod %s/%s attached to network %s: %v", + name, pod.Namespace, pod.Name, ni.GetNetworkName(), err) } } return nil } -func (oc *DefaultNetworkController) deletePodEgressIPAssignments(name string, statusesToRemove []egressipv1.EgressIPStatusItem, pod *kapi.Pod) error { - oc.eIPC.podAssignmentMutex.Lock() - defer oc.eIPC.podAssignmentMutex.Unlock() +func (e *EgressIPController) deletePodEgressIPAssignmentsWithCleanup(ni util.NetInfo, name string, statusesToRemove []egressipv1.EgressIPStatusItem, pod *kapi.Pod) error { + e.deletePreviousNetworkPodEgressIPAssignments(ni, name, statusesToRemove, pod) + return e.deletePodEgressIPAssignments(ni, name, statusesToRemove, pod) +} + +func (e *EgressIPController) deletePodEgressIPAssignments(ni util.NetInfo, name string, statusesToRemove []egressipv1.EgressIPStatusItem, pod *kapi.Pod) error { + e.podAssignmentMutex.Lock() + defer e.podAssignmentMutex.Unlock() podKey := getPodKey(pod) - podStatus, exists := oc.eIPC.podAssignment[podKey] + podStatus, exists := e.podAssignment[podKey] if !exists { return nil } else if podStatus.egressIPName != name { @@ -764,30 +957,23 @@ func (oc *DefaultNetworkController) deletePodEgressIPAssignments(name string, st podStatus.standbyEgressIPNames.Delete(name) return nil } - podIPs, err := util.GetPodCIDRsWithFullMask(pod, oc.NetInfo) - // FIXME(trozet): this error can be ignored if ErrNoPodIPFound, but unit test: - // egressIP pod recreate with same name (stateful-sets) shouldn't use stale logicalPortCache entries AND stale podAssignment cache entries - // heavily relies on this error happening. - if err != nil { - return err - } for _, statusToRemove := range statusesToRemove { if ok := podStatus.egressStatuses.contains(statusToRemove); !ok { // we can continue here since this pod was not managed by this statusToRemove continue } klog.V(2).Infof("Deleting pod egress IP status: %v for EgressIP: %s and pod: %s/%s", statusToRemove, name, pod.Name, pod.Namespace) - err = oc.eIPC.nodeZoneState.DoWithLock(statusToRemove.Node, func(key string) error { + err := e.nodeZoneState.DoWithLock(statusToRemove.Node, func(key string) error { if statusToRemove.Node == pod.Spec.NodeName { // we are safe, no need to grab lock again - if err := oc.eIPC.deletePodEgressIPAssignment(name, statusToRemove, pod, podIPs); err != nil { + if err := e.deletePodEgressIPAssignment(ni, name, statusToRemove, pod); err != nil { return err } podStatus.egressStatuses.delete(statusToRemove) return nil } - return oc.eIPC.nodeZoneState.DoWithLock(pod.Spec.NodeName, func(key string) error { - if err := oc.eIPC.deletePodEgressIPAssignment(name, statusToRemove, pod, podIPs); err != nil { + return e.nodeZoneState.DoWithLock(pod.Spec.NodeName, func(key string) error { + if err := e.deletePodEgressIPAssignment(ni, name, statusToRemove, pod); err != nil { return err } podStatus.egressStatuses.delete(statusToRemove) @@ -805,23 +991,96 @@ func (oc *DefaultNetworkController) deletePodEgressIPAssignments(name string, st // so remove the podKey from cache only if we are sure // there are no more egressStatuses managing this pod klog.V(5).Infof("Deleting pod key %s from assignment cache", podKey) - if oc.isPodScheduledinLocalZone(pod) { - // delete the podIP from the global egressIP address set - addrSetIPs := make([]net.IP, len(podIPs)) - for i, podIP := range podIPs { - copyPodIP := *podIP - addrSetIPs[i] = copyPodIP.IP - } - if err := oc.deletePodIPsFromAddressSet(addrSetIPs); err != nil { + if e.isPodScheduledinLocalZone(pod) { + if err := e.deletePodIPsFromAddressSet(ni.GetNetworkName(), e.controllerName, podStatus.podIPs...); err != nil { return fmt.Errorf("cannot delete egressPodIPs for the pod %s from the address set: err: %v", podKey, err) } } - delete(oc.eIPC.podAssignment, podKey) + delete(e.podAssignment, podKey) } return nil } -type egressIPCacheEntry struct { +// deletePreviousNetworkPodEgressIPAssignments checks if the network changed and remove any stale config on the previous network. +func (e *EgressIPController) deletePreviousNetworkPodEgressIPAssignments(ni util.NetInfo, name string, statusesToRemove []egressipv1.EgressIPStatusItem, pod *corev1.Pod) { + cachedNetwork := e.getNetworkFromPodAssignmentWithLock(getPodKey(pod)) + if cachedNetwork != nil { + if !cachedNetwork.Equals(ni) { + if err := e.deletePodEgressIPAssignments(cachedNetwork, name, statusesToRemove, pod); err != nil { + // no error is returned because high probability network is deleted + klog.Errorf("Failed to delete EgressIP %s assignment for pod %s/%s attached to network %s: %v", + name, pod.Namespace, pod.Name, cachedNetwork.GetNetworkName(), err) + } + } + } +} + +// isPodScheduledinLocalZone returns true if +// - e.localZoneNodes map is nil or +// - if the pod.Spec.NodeName is in the e.localZoneNodes map +// +// false otherwise. +func (e *EgressIPController) isPodScheduledinLocalZone(pod *kapi.Pod) bool { + if !config.OVNKubernetesFeature.EnableInterconnect { + return true + } + isLocalZonePod := true + + if e.nodeZoneState != nil { + if util.PodScheduled(pod) { + if isLocal, ok := e.nodeZoneState.Load(pod.Spec.NodeName); ok { + isLocalZonePod = isLocal + } + } else { + isLocalZonePod = false + } + } + return isLocalZonePod +} + +// isLocalZoneNode returns true if the node is part of the local zone. +func (e *EgressIPController) isLocalZoneNode(node *kapi.Node) bool { + /** HACK BEGIN **/ + // TODO(tssurya): Remove this HACK a few months from now. This has been added only to + // minimize disruption for upgrades when moving to interconnect=true. + // We want the legacy ovnkube-master to wait for remote ovnkube-node to + // signal it using "k8s.ovn.org/remote-zone-migrated" annotation before + // considering a node as remote when we upgrade from "global" (1 zone IC) + // zone to multi-zone. This is so that network disruption for the existing workloads + // is negligible and until the point where ovnkube-node flips the switch to connect + // to the new SBDB, it would continue talking to the legacy RAFT ovnkube-sbdb to ensure + // OVN/OVS flows are intact. + if e.zone == types.OvnDefaultZone { + return !util.HasNodeMigratedZone(node) + } + /** HACK END **/ + return util.GetNodeZone(node) == e.zone +} + +type egressIPCache struct { + // egressIP name -> network name -> cache + egressIPNameToPods map[string]map[string]selectedPods + // egressLocalNodes will contain all nodes that are local + // to this zone which are serving this egressIP object.. + // This will help sync SNATs + egressLocalNodesCache sets.Set[string] + // egressIP IP -> assigned node name + egressIPIPToNodeCache map[string]string + // node name -> network name -> redirect IPs + egressNodeRedirectsCache nodeNetworkRedirects + // network name -> OVN cluster router name + networkToRouter map[string]string + // packet mark for primary secondary networks + // EgressIP name -> mark + markCache map[string]string +} + +type nodeNetworkRedirects struct { + // node name -> network name -> redirect IPs + cache map[string]map[string]redirectIPs +} + +type selectedPods struct { // egressLocalPods will contain all the pods that // are local to this zone being served by thie egressIP // object. This will help sync LRP & LRSR. @@ -830,15 +1089,27 @@ type egressIPCacheEntry struct { // that are being served by this egressIP object // This will help sync SNATs. egressRemotePods map[string]sets.Set[string] // will be used only when multizone IC is enabled - gatewayRouterIPs sets.Set[string] - egressIPs map[string]string - // egressLocalNodes will contain all nodes that are local - // to this zone which are serving this egressIP object.. - // This will help sync SNATs - egressLocalNodes sets.Set[string] } -func (oc *DefaultNetworkController) syncEgressIPs(namespaces []interface{}) error { +// redirectIPs stores IPv4 or IPv6 next hops to support creation of OVN router logical router policies. +type redirectIPs struct { + v4Gateway string + v6Gateway string + v4TransitSwitch string + v6TransitSwitch string + v4MgtPort string + v6MgtPort string +} + +func (r redirectIPs) containsIP(ip string) bool { + switch ip { + case r.v4MgtPort, r.v6MgtPort, r.v4TransitSwitch, r.v6TransitSwitch, r.v4Gateway, r.v6Gateway: + return true + } + return false +} + +func (e *EgressIPController) syncEgressIPs(namespaces []interface{}) error { // This part will take of syncing stale data which we might have in OVN if // there's no ovnkube-master running for a while, while there are changes to // pods/egress IPs. @@ -855,30 +1126,35 @@ func (oc *DefaultNetworkController) syncEgressIPs(namespaces []interface{}) erro // WatchNodes() is called before WatchEgressIPNamespaces() so the oc.localZones cache // will be updated whereas WatchEgressNodes() is called after WatchEgressIPNamespaces() // and so we must update the cache to ensure we are not stale. - if err := oc.syncLocalNodeZonesCache(); err != nil { - return fmt.Errorf("syncLocalNodeZonesCache unable to update the local zones node cache: %v", err) - } - egressIPCache, err := oc.generateCacheForEgressIP() + // FIXME(martinkennelly): re-enable when EIP controller is fully extracted from DNC + //if err := e.SyncLocalNodeZonesCache(); err != nil { + // return fmt.Errorf("SyncLocalNodeZonesCache unable to update the local zones node cache: %v", err) + //} + egressIPCache, err := e.generateCacheForEgressIP() if err != nil { return fmt.Errorf("syncEgressIPs unable to generate cache for egressip: %v", err) } - if err = oc.syncStaleEgressReroutePolicy(egressIPCache); err != nil { + if err = e.syncStaleEgressReroutePolicy(egressIPCache); err != nil { return fmt.Errorf("syncEgressIPs unable to remove stale reroute policies: %v", err) } - if err = oc.syncStaleSNATRules(egressIPCache); err != nil { + if err = e.syncStaleSNATRules(egressIPCache); err != nil { return fmt.Errorf("syncEgressIPs unable to remove stale nats: %v", err) } - if err = oc.syncPodAssignmentCache(egressIPCache); err != nil { + if err = e.syncPodAssignmentCache(egressIPCache); err != nil { return fmt.Errorf("syncEgressIPs unable to sync internal pod assignment cache: %v", err) } - if err = oc.syncStaleAddressSetIPs(egressIPCache); err != nil { + if err = e.syncStaleAddressSetIPs(egressIPCache); err != nil { return fmt.Errorf("syncEgressIPs unable to reset stale address IPs: %v", err) } + if err = e.syncStaleGWMarkRules(egressIPCache); err != nil { + return fmt.Errorf("syncEgressIPs unable to sync GW packet mark rules: %v", err) + } return nil } -func (oc *DefaultNetworkController) syncLocalNodeZonesCache() error { - nodes, err := oc.watchFactory.GetNodes() +// SyncLocalNodeZonesCache iterates over all known Nodes and stores whether it is a local or remote OVN zone. +func (e *EgressIPController) SyncLocalNodeZonesCache() error { + nodes, err := e.watchFactory.GetNodes() if err != nil { return fmt.Errorf("unable to fetch nodes from watch factory %w", err) } @@ -887,190 +1163,442 @@ func (oc *DefaultNetworkController) syncLocalNodeZonesCache() error { // while the node's annotations are not yet set, so it still shows global. // The EgressNodeType events (which are basically all node updates) should // constantly update this cache as nodes get added, updated and removed - oc.eIPC.nodeZoneState.LockKey(node.Name) - oc.eIPC.nodeZoneState.Store(node.Name, oc.isLocalZoneNode(node)) - oc.eIPC.nodeZoneState.UnlockKey(node.Name) + e.nodeZoneState.LockKey(node.Name) + e.nodeZoneState.Store(node.Name, e.isLocalZoneNode(node)) + e.nodeZoneState.UnlockKey(node.Name) } return nil } -func (oc *DefaultNetworkController) syncStaleAddressSetIPs(egressIPCache map[string]egressIPCacheEntry) error { - dbIDs := getEgressIPAddrSetDbIDs(EgressIPServedPodsAddrSetName, oc.controllerName) - as, err := oc.addressSetFactory.EnsureAddressSet(dbIDs) - if err != nil { - return fmt.Errorf("cannot ensure that addressSet for egressIP pods %s exists %v", EgressIPServedPodsAddrSetName, err) - } - var allEIPServedPodIPs []net.IP - // we only care about local zone pods for the address-set since - // traffic from remote pods towards nodeIP won't even reach this zone - for eipName := range egressIPCache { - for _, podIPs := range egressIPCache[eipName].egressLocalPods { - for podIP := range podIPs { - allEIPServedPodIPs = append(allEIPServedPodIPs, net.ParseIP(podIP)) +func (e *EgressIPController) syncStaleAddressSetIPs(egressIPCache egressIPCache) error { + for _, networkPodCache := range egressIPCache.egressIPNameToPods { + for networkName, podCache := range networkPodCache { + dbIDs := getEgressIPAddrSetDbIDs(EgressIPServedPodsAddrSetName, networkName, e.controllerName) + as, err := e.addressSetFactory.EnsureAddressSet(dbIDs) + if err != nil { + return fmt.Errorf("network %s: cannot ensure that addressSet for egressIP pods %s exists %v", networkName, EgressIPServedPodsAddrSetName, err) + } + var allEIPServedPodIPs []net.IP + // we only care about local zone pods for the address-set since + // traffic from remote pods towards nodeIP won't even reach this zone + for _, podIPs := range podCache.egressLocalPods { + for podIP := range podIPs { + allEIPServedPodIPs = append(allEIPServedPodIPs, net.ParseIP(podIP)) + } + } + + // we replace all IPs in the address-set based on eIP cache constructed from kapi + // note that setIPs is not thread-safe + if err = as.SetAddresses(util.StringSlice(allEIPServedPodIPs)); err != nil { + return fmt.Errorf("network %s: cannot reset egressPodIPs in address set %v: err: %v", networkName, EgressIPServedPodsAddrSetName, err) } } } - // we replace all IPs in the address-set based on eIP cache constructed from kapi - // note that setIPs is not thread-safe - if err = as.SetAddresses(util.StringSlice(allEIPServedPodIPs)); err != nil { - return fmt.Errorf("cannot reset egressPodIPs in address set %v: err: %v", EgressIPServedPodsAddrSetName, err) - } return nil } -// syncPodAssignmentCache rebuilds the internal pod cache used by the egressIP feature. -// We use the existing kapi and ovn-db information to populate oc.eIPC.podAssignment cache for -// all the pods that are managed by egressIPs. -// NOTE: This is done mostly to handle the corner case where one pod has more than one -// egressIP object matching it, in which case we do the ovn setup only for one of the objects. -// This corner case of same pod matching more than one object will not work for IC deployments -// since internal cache based logic will be different for different ovnkube-controllers -// zone can think objA is active while zoneb can think objB is active if both have multiple choice options -func (oc *DefaultNetworkController) syncPodAssignmentCache(egressIPCache map[string]egressIPCacheEntry) error { - oc.eIPC.podAssignmentMutex.Lock() - defer oc.eIPC.podAssignmentMutex.Unlock() - for egressIPName, state := range egressIPCache { - p1 := func(item *nbdb.LogicalRouterPolicy) bool { - return item.Priority == types.EgressIPReroutePriority && item.ExternalIDs["name"] == egressIPName - } - reRoutePolicies, err := libovsdbops.FindLogicalRouterPoliciesWithPredicate(oc.nbClient, p1) - if err != nil { - return err - } - p2 := func(item *nbdb.NAT) bool { - return item.ExternalIDs["name"] == egressIPName - } - egressIPSNATs, err := libovsdbops.FindNATsWithPredicate(oc.nbClient, p2) - if err != nil { - return err - } - // Because of how we do generateCacheForEgressIP, we will only have pods that are - // either local to zone (in which case reRoutePolicies will work) OR pods that are - // managed by local egressIP nodes (in which case egressIPSNATs will work) - egressPods := make(map[string]sets.Set[string]) - for podKey, podIPs := range state.egressLocalPods { - egressPods[podKey] = podIPs - } - for podKey, podIPs := range state.egressRemotePods { - egressPods[podKey] = podIPs - } - for podKey, podIPs := range egressPods { - podState, ok := oc.eIPC.podAssignment[podKey] - if !ok { - podState = &podAssignmentState{ - egressStatuses: egressStatuses{make(map[egressipv1.EgressIPStatusItem]string)}, - standbyEgressIPNames: sets.New[string](), - } +// syncStaleGWMarkRules removes stale or invalid LRP that packet mark. They are attached to egress nodes gateway router. +// It adds expected LRPs that packet mark. +func (e *EgressIPController) syncStaleGWMarkRules(egressIPCache egressIPCache) error { + // Delete all stale LRPs then add missing LRPs + // This func assumes one node per zone. It determines if an LRP is a valid local LRP. It doesn't determine if the + // LRP is attached to the correct GW router + if !util.IsNetworkSegmentationSupportEnabled() || !config.OVNKubernetesFeature.EnableInterconnect { + return nil + } + for _, networkPodCache := range egressIPCache.egressIPNameToPods { + for networkName, podCache := range networkPodCache { + // skip GW mark rules processing for CDN because they don't exist + if networkName == types.DefaultNetworkName { + continue } - podState.standbyEgressIPNames.Insert(egressIPName) - for _, policy := range reRoutePolicies { - splitMatch := strings.Split(policy.Match, " ") - if len(splitMatch) <= 0 { - continue + invalidLRPPredicate := func(item *nbdb.LogicalRouterPolicy) bool { + if item.Priority != types.EgressIPSNATMarkPriority || item.Action != nbdb.LogicalRouterPolicyActionAllow { + return false } - logicalIP := splitMatch[len(splitMatch)-1] - parsedLogicalIP := net.ParseIP(logicalIP) - if parsedLogicalIP == nil { - continue + // skip if owned by another controller + if item.ExternalIDs[libovsdbops.OwnerControllerKey.String()] != getNetworkControllerName(networkName) { + return false } - - if podIPs.Has(parsedLogicalIP.String()) { // should match for only one egressIP object - podState.egressIPName = egressIPName - podState.standbyEgressIPNames.Delete(egressIPName) - klog.Infof("EgressIP %s is managing pod %s", egressIPName, podKey) + eIPName, podNamespaceName := getEIPLRPObjK8MetaData(item.ExternalIDs) + if eIPName == "" || podNamespaceName == "" { + klog.Errorf("Sync stale SNAT Mark rules for network %s unable to process logical router policy because invalid meta data", networkName) + return true + } + _, exists := egressIPCache.egressIPNameToPods[eIPName] + // if EgressIP doesn't exist, its stale + if !exists { + return true + } + // if theres no local egress nodes, the LRP must be invalid + if egressIPCache.egressLocalNodesCache.Len() == 0 { + return true + } + ipsLocal, okLocal := podCache.egressLocalPods[podNamespaceName] + ipsRemote, okRemote := podCache.egressRemotePods[podNamespaceName] + // if pod doesn't exist locally or remote, its stale + if !okLocal && !okRemote { + return true } + var ips sets.Set[string] + if okLocal { + ips = ipsLocal + } + if okRemote { + ips = ipsRemote + } + podIP := getPodIPFromEIPSNATMarkMatch(item.Match) + if podIP == "" { + // invalid match + return true + } + if !ips.Has(podIP) { + return true + } + // FIXME: not multi node per zone aware. Doesn't try to find out if the LRP is on the correct nodes GW router + pktMarkValue, ok := item.Options["pkt_mark"] + if !ok || egressIPCache.markCache[eIPName] != "" && pktMarkValue != egressIPCache.markCache[eIPName] { + return true + } + return false + } + invalidLRPs, err := libovsdbops.FindLogicalRouterPoliciesWithPredicate(e.nbClient, invalidLRPPredicate) + if err != nil { + return fmt.Errorf("network %s: unable to retrieve invalid SNAT mark logical router polices: %v", networkName, err) + } + if len(invalidLRPs) == 0 { + return nil + } + // gather UUIDs of invalid LRPs + invalidLRPUUIDs := sets.New[string]() + for _, invalidLRP := range invalidLRPs { + invalidLRPUUIDs.Insert(invalidLRP.UUID) + } + // gather local node names + localNodeNames := make([]string, 0, 1) + allNodes := e.nodeZoneState.GetKeys() + for _, node := range allNodes { + if isLocal, ok := e.nodeZoneState.Load(node); ok && isLocal { + localNodeNames = append(localNodeNames, node) + } + } + invalidLRPPredicate = func(item *nbdb.LogicalRouterPolicy) bool { + return invalidLRPUUIDs.Has(item.UUID) } - for _, snat := range egressIPSNATs { - if podIPs.Has(snat.LogicalIP) { // should match for only one egressIP object - podState.egressIPName = egressIPName - podState.standbyEgressIPNames.Delete(egressIPName) - klog.Infof("EgressIP %s is managing pod %s", egressIPName, podKey) + for _, nodeName := range localNodeNames { + ni, err := util.NewNetInfo(&ovncnitypes.NetConf{ + Topology: types.Layer3Topology, + NetConf: cnitypes.NetConf{ + Name: networkName, + }, + }) + if err != nil { + return fmt.Errorf("failed to create new network %s: %v", networkName, err) + } + routerName := ni.GetNetworkScopedGWRouterName(nodeName) + lrps, err := libovsdbops.FindALogicalRouterPoliciesWithPredicate(e.nbClient, routerName, invalidLRPPredicate) + if err != nil { + if errors.Is(err, libovsdbclient.ErrNotFound) { + continue + } + return fmt.Errorf("network %s: failed to find gateway routers (%s) invalid logical router policies: %v", networkName, routerName, err) + } + if err = libovsdbops.DeleteLogicalRouterPolicies(e.nbClient, routerName, lrps...); err != nil && !errors.Is(err, libovsdbclient.ErrNotFound) { + return fmt.Errorf("network %s: failed to delete gateway routers (%s) invalid logical router policies: %v", networkName, routerName, err) } } - oc.eIPC.podAssignment[podKey] = podState + } } - return nil -} - -// This function implements a portion of syncEgressIPs. -// It removes OVN logical router policies used by EgressIPs deleted while ovnkube-master was down. -// It also removes stale nexthops from router policies used by EgressIPs. -// Upon failure, it may be invoked multiple times in order to avoid a pod restart. -func (oc *DefaultNetworkController) syncStaleEgressReroutePolicy(egressIPCache map[string]egressIPCacheEntry) error { - logicalRouterPolicyStaleNexthops := []*nbdb.LogicalRouterPolicy{} - p := func(item *nbdb.LogicalRouterPolicy) bool { - if item.Priority != types.EgressIPReroutePriority { - return false - } - egressIPName := item.ExternalIDs["name"] - cacheEntry, exists := egressIPCache[egressIPName] - splitMatch := strings.Split(item.Match, " ") - logicalIP := splitMatch[len(splitMatch)-1] - parsedLogicalIP := net.ParseIP(logicalIP) - egressPodIPs := sets.NewString() - if exists { - // Since LRPs are created only for pods local to this zone - // we need to care about only those pods. Nexthop for them will - // either be transit switch IP or join switch IP or mp0 IP. - // FIXME: LRPs are also created for remote pods to route them - // correctly but we do not handling cleaning for them now - for _, podIPs := range cacheEntry.egressLocalPods { - egressPodIPs.Insert(podIPs.UnsortedList()...) - } + // ensure all LRPs to mark pods are present + isSupportedIP := func(podIP net.IP) bool { + isIPv6 := utilnet.IsIPv6(podIP) + if isIPv6 && e.v6 { + return true } - if !exists || cacheEntry.gatewayRouterIPs.Len() == 0 || !egressPodIPs.Has(parsedLogicalIP.String()) { - klog.Infof("syncStaleEgressReroutePolicy will delete %s due to no nexthop or stale logical ip: %v", egressIPName, item) + if !isIPv6 && e.v4 { return true } - // Check for stale nexthops that may exist in the logical router policy and store that in logicalRouterPolicyStaleNexthops. - // Note: adding missing nexthop(s) to the logical router policy is done outside the scope of this function. - staleNextHops := []string{} - for _, nexthop := range item.Nexthops { - if !cacheEntry.gatewayRouterIPs.Has(nexthop) { - staleNextHops = append(staleNextHops, nexthop) + return false + } + + processPodFn := func(ops []ovsdb.Operation, eIPName, podKey, mark, routerName, networkName string, podIPs sets.Set[string], isEIPIPv6 bool) ([]ovsdb.Operation, error) { + podNamespace, podName := getPodNamespaceAndNameFromKey(podKey) + dbIDs := getEgressIPLRPSNATMarkDbIDs(eIPName, podNamespace, podName, getEIPIPFamily(isEIPIPv6), networkName, e.controllerName) + for _, podIPStr := range podIPs.UnsortedList() { + podIP := net.ParseIP(podIPStr) + if podIP == nil || utilnet.IsIPv6(podIP) != isEIPIPv6 && !isSupportedIP(podIP) { + continue } - } - if len(staleNextHops) > 0 { lrp := nbdb.LogicalRouterPolicy{ - UUID: item.UUID, - Nexthops: staleNextHops, + Match: fmt.Sprintf("%s.src == %s", getEIPIPFamily(isEIPIPv6), podIPStr), + Priority: types.EgressIPSNATMarkPriority, + Action: nbdb.LogicalRouterPolicyActionAllow, + ExternalIDs: dbIDs.GetExternalIDs(), + Options: map[string]string{"pkt_mark": mark}, + } + p := libovsdbops.GetPredicate[*nbdb.LogicalRouterPolicy](dbIDs, nil) + ops, err := libovsdbops.CreateOrUpdateLogicalRouterPolicyWithPredicateOps(e.nbClient, ops, routerName, &lrp, p) + if err != nil { + return ops, fmt.Errorf("network %s: error creating logical router policy %+v create/update ops for packet marking on router %s: %v", networkName, lrp, routerName, err) } - logicalRouterPolicyStaleNexthops = append(logicalRouterPolicyStaleNexthops, &lrp) } - return false + return ops, nil } - err := libovsdbops.DeleteLogicalRouterPoliciesWithPredicate(oc.nbClient, oc.GetNetworkScopedClusterRouterName(), p) - if err != nil { - return fmt.Errorf("error deleting stale logical router policies from router %s: %v", oc.GetNetworkScopedClusterRouterName(), err) + var ops []ovsdb.Operation + for eIPName, networkPodCache := range egressIPCache.egressIPNameToPods { + if egressIPCache.markCache[eIPName] == "" { + continue + } + for networkName, podCache := range networkPodCache { + for eIP, nodeName := range egressIPCache.egressIPIPToNodeCache { + if !egressIPCache.egressLocalNodesCache.Has(nodeName) { + continue + } + ni, err := util.NewNetInfo(&ovncnitypes.NetConf{ + Topology: types.Layer3Topology, + NetConf: cnitypes.NetConf{ + Name: networkName, + }, + }) + if err != nil { + return fmt.Errorf("failed to create new network %s: %v", networkName, err) + } + routerName := ni.GetNetworkScopedGWRouterName(nodeName) + isEIPIPv6 := utilnet.IsIPv6String(eIP) + for podKey, podIPs := range podCache.egressLocalPods { + ops, err = processPodFn(ops, eIPName, podKey, egressIPCache.markCache[eIPName], routerName, networkName, podIPs, isEIPIPv6) + if err != nil { + return fmt.Errorf("network %s: failed to add process local pod pod %s gateway router SNAT mark: %v", networkName, podKey, err) + } + } + for podKey, podIPs := range podCache.egressRemotePods { + ops, err = processPodFn(ops, eIPName, podKey, egressIPCache.markCache[eIPName], routerName, networkName, podIPs, isEIPIPv6) + if err != nil { + return fmt.Errorf("network %s: failed to add process remote pod %s gateway router SNAT mark: %v", networkName, podKey, err) + } + } + } + } } - - // Update Logical Router Policies that have stale nexthops. Notice that we must do this separately - // because logicalRouterPolicyStaleNexthops must be populated first - klog.Infof("syncStaleEgressReroutePolicy will remove stale nexthops: %+v", logicalRouterPolicyStaleNexthops) - err = libovsdbops.DeleteNextHopsFromLogicalRouterPolicies(oc.nbClient, oc.GetNetworkScopedClusterRouterName(), logicalRouterPolicyStaleNexthops...) + _, err := libovsdbops.TransactAndCheck(e.nbClient, ops) if err != nil { - return fmt.Errorf("unable to remove stale next hops from logical router policies: %v", err) + return fmt.Errorf("error transacting ops %+v: %v", ops, err) } return nil } -// This function implements a portion of syncEgressIPs. -// It removes OVN NAT rules used by EgressIPs deleted while ovnkube-master was down. -// Upon failure, it may be invoked multiple times in order to avoid a pod restart. -func (oc *DefaultNetworkController) syncStaleSNATRules(egressIPCache map[string]egressIPCacheEntry) error { - predicate := func(item *nbdb.NAT) bool { - egressIPName, exists := item.ExternalIDs["name"] - // Exclude rows that have no name or are not the right type - if !exists || item.Type != nbdb.NATTypeSNAT { - return false +// syncPodAssignmentCache rebuilds the internal pod cache used by the egressIP feature. +// We use the existing kapi and ovn-db information to populate oc.eIPC.podAssignment cache for +// all the pods that are managed by egressIPs. +// NOTE: This is done mostly to handle the corner case where one pod has more than one +// egressIP object matching it, in which case we do the ovn setup only for one of the objects. +// This corner case of same pod matching more than one object will not work for IC deployments +// since internal cache based logic will be different for different ovnkube-controllers +// zone can think objA is active while zoneb can think objB is active if both have multiple choice options +func (e *EgressIPController) syncPodAssignmentCache(egressIPCache egressIPCache) error { + e.podAssignmentMutex.Lock() + defer e.podAssignmentMutex.Unlock() + for egressIPName, networkPods := range egressIPCache.egressIPNameToPods { + for networkName, podCache := range networkPods { + p1 := func(item *nbdb.LogicalRouterPolicy) bool { + return item.Priority == types.EgressIPReroutePriority && + item.ExternalIDs[libovsdbops.NetworkKey.String()] == networkName && + item.ExternalIDs[libovsdbops.OwnerControllerKey.String()] == getNetworkControllerName(networkName) && + strings.HasPrefix(item.ExternalIDs[libovsdbops.ObjectNameKey.String()], egressIPName+dbIDEIPNamePodDivider) + } + ni, err := e.nadController.GetNetwork(networkName) + if err != nil { + return fmt.Errorf("failed to get active network for network name %q: %v", networkName, err) + } + reRoutePolicies, err := libovsdbops.FindALogicalRouterPoliciesWithPredicate(e.nbClient, ni.GetNetworkScopedClusterRouterName(), p1) + if err != nil { + return err + } + // not scoped by network since all NATs selected by the follow predicate select only CDN NATs + p2 := func(item *nbdb.NAT) bool { + return strings.HasPrefix(item.ExternalIDs[libovsdbops.ObjectNameKey.String()], egressIPName) + } + // NATs are only valid for CDN + egressIPSNATs := make([]*nbdb.NAT, 0) + if networkName == types.DefaultNetworkName { + egressIPSNATs, err = libovsdbops.FindNATsWithPredicate(e.nbClient, p2) + if err != nil { + return fmt.Errorf("failed to find NATs with predicate for network %s: %v", networkName, err) + } + } + // Because of how we do generateCacheForEgressIP, we will only have pods that are + // either local to zone (in which case reRoutePolicies will work) OR pods that are + // managed by local egressIP nodes (in which case egressIPSNATs will work) + egressPods := make(map[string]sets.Set[string]) + for podKey, podIPs := range podCache.egressLocalPods { + egressPods[podKey] = podIPs + } + for podKey, podIPs := range podCache.egressRemotePods { + egressPods[podKey] = podIPs + } + for podKey, podIPsSet := range egressPods { + podIPs := make([]net.IP, 0, podIPsSet.Len()) + for _, podIP := range podIPsSet.UnsortedList() { + podIPs = append(podIPs, net.ParseIP(podIP)) + } + podState, ok := e.podAssignment[podKey] + if !ok { + podState = &podAssignmentState{ + egressStatuses: egressStatuses{make(map[egressipv1.EgressIPStatusItem]string)}, + standbyEgressIPNames: sets.New[string](), + podIPs: podIPs, + network: ni, + } + } + + podState.standbyEgressIPNames.Insert(egressIPName) + for _, policy := range reRoutePolicies { + splitMatch := strings.Split(policy.Match, " ") + if len(splitMatch) <= 0 { + continue + } + logicalIP := splitMatch[len(splitMatch)-1] + parsedLogicalIP := net.ParseIP(logicalIP) + if parsedLogicalIP == nil { + continue + } + + if podIPsSet.Has(parsedLogicalIP.String()) { // should match for only one egressIP object + podState.egressIPName = egressIPName + podState.standbyEgressIPNames.Delete(egressIPName) + klog.Infof("EgressIP %s is managing pod %s for network %s", egressIPName, podKey, networkName) + } + } + // process SNAT only for CDN + if networkName == types.DefaultNetworkName { + for _, snat := range egressIPSNATs { + if podIPsSet.Has(snat.LogicalIP) { // should match for only one egressIP object + podState.egressIPName = egressIPName + podState.standbyEgressIPNames.Delete(egressIPName) + klog.Infof("EgressIP %s is managing pod %s for network %s", egressIPName, podKey, networkName) + } + } + } + + e.podAssignment[podKey] = podState + } } + } + + return nil +} + +// This function implements a portion of syncEgressIPs. +// It removes OVN logical router policies used by EgressIPs deleted while ovnkube-master was down. +// It also removes stale nexthops from router policies used by EgressIPs. +// Upon failure, it may be invoked multiple times in order to avoid a pod restart. +func (e *EgressIPController) syncStaleEgressReroutePolicy(cache egressIPCache) error { + for _, networkCache := range cache.egressIPNameToPods { + for networkName, data := range networkCache { + logicalRouterPolicyStaleNexthops := []*nbdb.LogicalRouterPolicy{} + p := func(item *nbdb.LogicalRouterPolicy) bool { + if item.Priority != types.EgressIPReroutePriority || item.ExternalIDs[libovsdbops.NetworkKey.String()] != networkName { + return false + } + egressIPName, _ := getEIPLRPObjK8MetaData(item.ExternalIDs) + if egressIPName == "" { + klog.Errorf("syncStaleEgressReroutePolicy found logical router policy (UUID: %s) with invalid meta data associated with network %s", item.UUID, networkName) + return false + } + splitMatch := strings.Split(item.Match, " ") + logicalIP := splitMatch[len(splitMatch)-1] + parsedLogicalIP := net.ParseIP(logicalIP) + egressPodIPs := sets.NewString() + // Since LRPs are created only for pods local to this zone + // we need to care about only those pods. Nexthop for them will + // either be transit switch IP or join switch IP or mp0 IP. + // FIXME: LRPs are also created for remote pods to route them + // correctly but we do not handling cleaning for them now + for _, podIPs := range data.egressLocalPods { + egressPodIPs.Insert(podIPs.UnsortedList()...) + } + if !egressPodIPs.Has(parsedLogicalIP.String()) { + klog.Infof("syncStaleEgressReroutePolicy will delete %s due to no nexthop or stale logical ip: %v", egressIPName, item) + return true + } + // Check for stale nexthops that may exist in the logical router policy and store that in logicalRouterPolicyStaleNexthops. + // Note: adding missing nexthop(s) to the logical router policy is done outside the scope of this function. + staleNextHops := []string{} + for _, nexthop := range item.Nexthops { + nodeName, ok := cache.egressIPIPToNodeCache[parsedLogicalIP.String()] + if ok { + klog.Infof("syncStaleEgressReroutePolicy will delete %s due to no node assigned to logical ip: %v", egressIPName, item) + return true + } + networksRedirects, ok := cache.egressNodeRedirectsCache.cache[nodeName] + if ok { + klog.Infof("syncStaleEgressReroutePolicy will delete %s due to no network in cache: %v", egressIPName, item) + return true + } + redirects, ok := networksRedirects[networkName] + if !ok { + klog.Infof("syncStaleEgressReroutePolicy will delete %s due to no redirects for network in cache: %v", egressIPName, item) + return true + } + //FIXME: be more specific about which is the valid next hop instead of relying on verifying if the IP is within a valid set of IPs. + if !redirects.containsIP(nexthop) { + staleNextHops = append(staleNextHops, nexthop) + } + } + if len(staleNextHops) > 0 { + lrp := nbdb.LogicalRouterPolicy{ + UUID: item.UUID, + Nexthops: staleNextHops, + } + logicalRouterPolicyStaleNexthops = append(logicalRouterPolicyStaleNexthops, &lrp) + } + return false + } + + err := libovsdbops.DeleteLogicalRouterPoliciesWithPredicate(e.nbClient, cache.networkToRouter[networkName], p) + if err != nil { + return fmt.Errorf("error deleting stale logical router policies from router %s for network %s: %v", cache.networkToRouter[networkName], networkName, err) + } + + // Update Logical Router Policies that have stale nexthops. Notice that we must do this separately + // because logicalRouterPolicyStaleNexthops must be populated first + klog.Infof("syncStaleEgressReroutePolicy will remove stale nexthops for network %s: %+v", networkName, logicalRouterPolicyStaleNexthops) + err = libovsdbops.DeleteNextHopsFromLogicalRouterPolicies(e.nbClient, cache.networkToRouter[networkName], logicalRouterPolicyStaleNexthops...) + if err != nil { + return fmt.Errorf("unable to remove stale next hops from logical router policies for network %s: %v", networkName, err) + } + } + } + + return nil +} + +// This function implements a portion of syncEgressIPs. +// It removes OVN NAT rules used by EgressIPs deleted while ovnkube-master was down for the default cluster network only. +// Upon failure, it may be invoked multiple times in order to avoid a pod restart. For UDNs, we do not SNAT to the EgressIP +// using OVNs gateway router and in-fact we do not use OVN, but instead we add OVS flows in the external bridge to SNAT to the EgressIP. This is not managed here. +func (e *EgressIPController) syncStaleSNATRules(egressIPCache egressIPCache) error { + predicate := func(item *nbdb.NAT) bool { + if item.Type != nbdb.NATTypeSNAT { + return false + } + egressIPMetaData, exists := item.ExternalIDs[libovsdbops.ObjectNameKey.String()] + if !exists { + return false + } + egressIPMeta := strings.Split(egressIPMetaData, dbIDEIPNamePodDivider) + if len(egressIPMeta) != 2 { + klog.Errorf("Found NAT %s with erroneous object name key %s", item.UUID, egressIPMetaData) + return false + } + egressIPName := egressIPMeta[0] parsedLogicalIP := net.ParseIP(item.LogicalIP).String() - cacheEntry, exists := egressIPCache[egressIPName] + cacheEntry, exists := egressIPCache.egressIPNameToPods[egressIPName][types.DefaultNetworkName] egressPodIPs := sets.NewString() if exists { // since SNATs can be present either if status.Node was local to @@ -1086,15 +1614,20 @@ func (oc *DefaultNetworkController) syncStaleSNATRules(egressIPCache map[string] klog.Infof("syncStaleSNATRules will delete %s due to logical ip: %v", egressIPName, item) return true } - if node, ok := cacheEntry.egressIPs[item.ExternalIP]; !ok || !cacheEntry.egressLocalNodes.Has(node) || - item.LogicalPort == nil || *item.LogicalPort != oc.GetNetworkScopedK8sMgmtIntfName(node) { + ni, err := e.nadController.GetNetwork(types.DefaultNetworkName) + if err != nil { + klog.Errorf("syncStaleSNATRules failed to find default network in networks cache") + return false + } + if node, ok := egressIPCache.egressIPIPToNodeCache[item.ExternalIP]; !ok || !cacheEntry.egressLocalPods[types.DefaultNetworkName].Has(node) || + item.LogicalPort == nil || *item.LogicalPort != ni.GetNetworkScopedK8sMgmtIntfName(node) { klog.Infof("syncStaleSNATRules will delete %s due to external ip or stale logical port: %v", egressIPName, item) return true } return false } - nats, err := libovsdbops.FindNATsWithPredicate(oc.nbClient, predicate) + nats, err := libovsdbops.FindNATsWithPredicate(e.nbClient, predicate) if err != nil { return fmt.Errorf("unable to sync egress IPs err: %v", err) } @@ -1111,7 +1644,7 @@ func (oc *DefaultNetworkController) syncStaleSNATRules(egressIPCache map[string] p := func(item *nbdb.LogicalRouter) bool { return natIds.HasAny(item.Nat...) } - routers, err := libovsdbops.FindLogicalRoutersWithPredicate(oc.nbClient, p) + routers, err := libovsdbops.FindLogicalRoutersWithPredicate(e.nbClient, p) if err != nil { return fmt.Errorf("unable to sync egress IPs, err: %v", err) } @@ -1119,7 +1652,7 @@ func (oc *DefaultNetworkController) syncStaleSNATRules(egressIPCache map[string] var errors []error ops := []ovsdb.Operation{} for _, router := range routers { - ops, err = libovsdbops.DeleteNATsOps(oc.nbClient, ops, router, nats...) + ops, err = libovsdbops.DeleteNATsOps(e.nbClient, ops, router, nats...) if err != nil { errors = append(errors, fmt.Errorf("error deleting stale NAT from router %s: %v", router.Name, err)) continue @@ -1135,13 +1668,13 @@ func (oc *DefaultNetworkController) syncStaleSNATRules(egressIPCache map[string] predicate := func(item *nbdb.NAT) bool { return natIds.Has(item.UUID) } - ops, err = libovsdbops.DeleteNATsWithPredicateOps(oc.nbClient, ops, predicate) + ops, err = libovsdbops.DeleteNATsWithPredicateOps(e.nbClient, ops, predicate) if err != nil { return fmt.Errorf("unable to delete stale SNATs err: %v", err) } } - _, err = libovsdbops.TransactAndCheck(oc.nbClient, ops) + _, err = libovsdbops.TransactAndCheck(e.nbClient, ops) if err != nil { return fmt.Errorf("error deleting stale NATs: %v", err) } @@ -1153,90 +1686,209 @@ func (oc *DefaultNetworkController) syncStaleSNATRules(egressIPCache map[string] // atomic items with the same general information repeated across most (egressIP // name, logical IP defined for that name), hence use a cache to avoid round // trips to the API server per item. -func (oc *DefaultNetworkController) generateCacheForEgressIP() (map[string]egressIPCacheEntry, error) { - egressIPCache := make(map[string]egressIPCacheEntry) - egressIPs, err := oc.watchFactory.GetEgressIPs() +func (e *EgressIPController) generateCacheForEgressIP() (egressIPCache, error) { + cache := egressIPCache{} + namespaces, err := e.watchFactory.GetNamespaces() if err != nil { - return nil, err + return cache, fmt.Errorf("failed to get all namespaces: %v", err) } - for _, egressIP := range egressIPs { - egressIPCache[egressIP.Name] = egressIPCacheEntry{ - egressLocalPods: make(map[string]sets.Set[string]), - egressRemotePods: make(map[string]sets.Set[string]), - gatewayRouterIPs: sets.New[string](), // can be transit switchIPs for interconnect multizone setup - egressIPs: map[string]string{}, - egressLocalNodes: sets.New[string](), + nodes, err := e.watchFactory.GetNodes() + if err != nil { + return cache, fmt.Errorf("failed to get all nodes: %v", err) + } + localZoneNodes := sets.New[string]() + nodeNames := e.nodeZoneState.GetKeys() + for _, nodeName := range nodeNames { + if isLocal, ok := e.nodeZoneState.Load(nodeName); ok && isLocal { + localZoneNodes.Insert(nodeName) } - for _, status := range egressIP.Status.Items { - var nextHopIP string - isEgressIPv6 := utilnet.IsIPv6String(status.EgressIP) - _, isLocalZoneEgressNode := oc.localZoneNodes.Load(status.Node) - if isLocalZoneEgressNode { - gatewayRouterIP, err := oc.eIPC.getGatewayRouterJoinIP(status.Node, isEgressIPv6) - if err != nil { - klog.Errorf("Unable to retrieve gateway IP for node: %s, protocol is IPv6: %v, err: %v", status.Node, isEgressIPv6, err) + } + // network name -> node name -> redirect IPs + redirectCache := map[string]map[string]redirectIPs{} + cache.egressNodeRedirectsCache = nodeNetworkRedirects{redirectCache} + cache.networkToRouter = map[string]string{} + // build a map of networks -> nodes -> redirect IP + for _, namespace := range namespaces { + ni, err := e.nadController.GetActiveNetworkForNamespace(namespace.Name) + if err != nil { + klog.Errorf("Failed to get active network for namespace %s, stale objects may remain: %v", namespace.Name, err) + continue + } + // skip if already processed + if _, ok := redirectCache[ni.GetNetworkName()]; ok { + continue + } + redirectCache[ni.GetNetworkName()] = map[string]redirectIPs{} + cache.networkToRouter[ni.GetNetworkName()] = ni.GetNetworkScopedClusterRouterName() + for _, node := range nodes { + r := redirectIPs{} + mgmtPort := &nbdb.LogicalSwitchPort{Name: ni.GetNetworkScopedK8sMgmtIntfName(node.Name)} + mgmtPort, err := libovsdbops.GetLogicalSwitchPort(e.nbClient, mgmtPort) + if err != nil { + // if switch port isnt created, we can assume theres nothing to sync + if errors.Is(err, libovsdbclient.ErrNotFound) { continue } - nextHopIP = gatewayRouterIP.String() - egressIPCache[egressIP.Name].egressLocalNodes.Insert(status.Node) + return cache, fmt.Errorf("failed to find management port for node %s: %v", node.Name, err) + } + mgmtPortAddresses := mgmtPort.GetAddresses() + if len(mgmtPortAddresses) == 0 { + return cache, fmt.Errorf("management switch port %s for node %s does not contain any addresses", ni.GetNetworkScopedK8sMgmtIntfName(node.Name), node.Name) + } + // assuming only one IP per IP family + for _, mgmtPortAddress := range mgmtPortAddresses { + mgmtPortAddressesStr := strings.Fields(mgmtPortAddress) + mgmtPortIP := net.ParseIP(mgmtPortAddressesStr[1]) + if utilnet.IsIPv6(mgmtPortIP) { + if ip := mgmtPortIP.To16(); ip != nil { + r.v6MgtPort = ip.String() + } + } else { + if ip := mgmtPortIP.To4(); ip != nil { + r.v4MgtPort = ip.String() + } + } + } + + if localZoneNodes.Has(node.Name) { + if e.v4 { + if gatewayRouterIP, err := e.getGatewayRouterJoinIP(ni, node.Name, false); err != nil { + klog.V(5).Infof("Unable to retrieve gateway IP for node: %s, protocol is IPv4: err: %v", node.Name, err) + } else { + r.v4Gateway = gatewayRouterIP.String() + } + } + if e.v6 { + if gatewayRouterIP, err := e.getGatewayRouterJoinIP(ni, node.Name, true); err != nil { + klog.V(5).Infof("Unable to retrieve gateway IP for node: %s, protocol is IPv6: err: %v", node.Name, err) + } else { + r.v6Gateway = gatewayRouterIP.String() + } + } } else { - nextHopIP, err = oc.eIPC.getTransitIP(status.Node, isEgressIPv6) - if err != nil { - klog.Errorf("Unable to fetch transit switch IP for node %s: %v", status.Node, err) - continue + if e.v4 { + nextHopIP, err := e.getTransitIP(node.Name, false) + if err != nil { + klog.V(5).Infof("Unable to fetch transit switch IPv4 for node %s: %v", node.Name, err) + } else { + r.v4TransitSwitch = nextHopIP + } + } + if e.v6 { + nextHopIP, err := e.getTransitIP(node.Name, true) + if err != nil { + klog.V(5).Infof("Unable to fetch transit switch IPv6 for node %s: %v", node.Name, err) + } else { + r.v6TransitSwitch = nextHopIP + } } } - egressIPCache[egressIP.Name].gatewayRouterIPs.Insert(nextHopIP) - egressIPCache[egressIP.Name].egressIPs[status.EgressIP] = status.Node + redirectCache[ni.GetNetworkName()][node.Name] = r } - namespaces, err := oc.watchFactory.GetNamespacesBySelector(egressIP.Spec.NamespaceSelector) + } + + // egressIP name -> network name -> cache + egressIPsCache := make(map[string]map[string]selectedPods) + cache.egressIPNameToPods = egressIPsCache + // egressLocalNodes will contain all nodes that are local + // to this zone which are serving this egressIP object.. + // This will help sync SNATs + egressLocalNodesCache := sets.New[string]() + cache.egressLocalNodesCache = egressLocalNodesCache + // egressIP name -> node name + egressNodesCache := make(map[string]string, 0) + cache.egressIPIPToNodeCache = egressNodesCache + cache.markCache = make(map[string]string) + egressIPs, err := e.watchFactory.GetEgressIPs() + if err != nil { + return cache, err + } + for _, egressIP := range egressIPs { + mark, err := util.ParseEgressIPMark(egressIP.Annotations) + if err != nil { + klog.Errorf("Failed to parse EgressIP %s mark: %v", egressIP.Name, err) + } + cache.markCache[egressIP.Name] = mark.String() + egressIPsCache[egressIP.Name] = make(map[string]selectedPods, 0) + for _, status := range egressIP.Status.Items { + if localZoneNodes.Has(status.Node) { + egressLocalNodesCache.Insert(status.Node) + } + egressNodesCache[status.EgressIP] = status.Node + } + namespaces, err = e.watchFactory.GetNamespacesBySelector(egressIP.Spec.NamespaceSelector) if err != nil { klog.Errorf("Error building egress IP sync cache, cannot retrieve namespaces for EgressIP: %s, err: %v", egressIP.Name, err) continue } for _, namespace := range namespaces { - pods, err := oc.watchFactory.GetPodsBySelector(namespace.Name, egressIP.Spec.PodSelector) + pods, err := e.watchFactory.GetPodsBySelector(namespace.Name, egressIP.Spec.PodSelector) if err != nil { klog.Errorf("Error building egress IP sync cache, cannot retrieve pods for namespace: %s and egress IP: %s, err: %v", namespace.Name, egressIP.Name, err) continue } + ni, err := e.nadController.GetActiveNetworkForNamespace(namespace.Name) + if err != nil { + klog.Errorf("Failed to get active network for namespace %s, skipping sync: %v", namespace.Name, err) + continue + } + _, ok := egressIPsCache[egressIP.Name][ni.GetNetworkName()] + if ok { + continue // aready populated + } + egressIPsCache[egressIP.Name][ni.GetNetworkName()] = selectedPods{ + egressLocalPods: map[string]sets.Set[string]{}, + egressRemotePods: map[string]sets.Set[string]{}, + } + nadName := types.DefaultNetworkName + if ni.IsSecondary() { + nadNames := ni.GetNADs() + if len(nadNames) == 0 { + klog.Errorf("Network %s: error build egress IP sync cache, expected at least one NAD name for Namespace %s", ni.GetNetworkName(), namespace.Name) + continue + } + nadName = nadNames[0] // there should only be one active network + } for _, pod := range pods { - if util.PodCompleted(pod) { + if util.PodCompleted(pod) || !util.PodScheduled(pod) || util.PodWantsHostNetwork(pod) { continue } - if len(egressIPCache[egressIP.Name].egressLocalNodes) == 0 && !oc.isPodScheduledinLocalZone(pod) { + if egressLocalNodesCache.Len() == 0 && !e.isPodScheduledinLocalZone(pod) { continue // don't process anything on master's that have nothing to do with the pod } - // FIXME(trozet): potential race where pod is not yet added in the cache by the pod handler - logicalPort, err := oc.logicalPortCache.get(pod, types.DefaultNetworkName) + podIPs, err := e.getPodIPs(ni, pod, nadName) if err != nil { - klog.Errorf("Error getting logical port %s, err: %v", util.GetLogicalPortName(pod.Namespace, pod.Name), err) + klog.Errorf("Network %s: error build egress IP sync cache, error while trying to get pod %s/%s IPs: %v", ni.GetNetworkName(), pod.Namespace, pod.Name, err) + continue + } + if len(podIPs) == 0 { continue } podKey := getPodKey(pod) - if oc.isPodScheduledinLocalZone(pod) { - _, ok := egressIPCache[egressIP.Name].egressLocalPods[podKey] + if e.isPodScheduledinLocalZone(pod) { + // + _, ok := egressIPsCache[egressIP.Name][ni.GetNetworkName()].egressLocalPods[podKey] if !ok { - egressIPCache[egressIP.Name].egressLocalPods[podKey] = sets.New[string]() + egressIPsCache[egressIP.Name][ni.GetNetworkName()].egressLocalPods[podKey] = sets.New[string]() } - for _, ipNet := range logicalPort.ips { - egressIPCache[egressIP.Name].egressLocalPods[podKey].Insert(ipNet.IP.String()) + for _, ipNet := range podIPs { + egressIPsCache[egressIP.Name][ni.GetNetworkName()].egressLocalPods[podKey].Insert(ipNet.IP.String()) } - } else if len(egressIPCache[egressIP.Name].egressLocalNodes) > 0 { + } else if egressLocalNodesCache.Len() > 0 { // it means this controller has at least one egressNode that is in localZone but matched pod is remote - _, ok := egressIPCache[egressIP.Name].egressRemotePods[podKey] + _, ok := egressIPsCache[egressIP.Name][ni.GetNetworkName()].egressRemotePods[podKey] if !ok { - egressIPCache[egressIP.Name].egressRemotePods[podKey] = sets.New[string]() + egressIPsCache[egressIP.Name][ni.GetNetworkName()].egressRemotePods[podKey] = sets.New[string]() } - for _, ipNet := range logicalPort.ips { - egressIPCache[egressIP.Name].egressRemotePods[podKey].Insert(ipNet.IP.String()) + for _, ipNet := range podIPs { + egressIPsCache[egressIP.Name][ni.GetNetworkName()].egressRemotePods[podKey].Insert(ipNet.IP.String()) } } } } } - return egressIPCache, nil + return cache, nil } type EgressIPPatchStatus struct { @@ -1253,7 +1905,7 @@ type EgressIPPatchStatus struct { // object update which risks resetting the EgressIP object's fields to the state // they had when we started processing the change. // used for UNIT TESTING only -func (oc *DefaultNetworkController) patchReplaceEgressIPStatus(name string, statusItems []egressipv1.EgressIPStatusItem) error { +func (e *EgressIPController) patchReplaceEgressIPStatus(name string, statusItems []egressipv1.EgressIPStatusItem) error { klog.Infof("Patching status on EgressIP %s: %v", name, statusItems) return retry.RetryOnConflict(retry.DefaultRetry, func() error { t := []EgressIPPatchStatus{ @@ -1269,25 +1921,46 @@ func (oc *DefaultNetworkController) patchReplaceEgressIPStatus(name string, stat if err != nil { return fmt.Errorf("error serializing status patch operation: %+v, err: %v", statusItems, err) } - return oc.kube.PatchEgressIP(name, op) + return e.kube.PatchEgressIP(name, op) }) } -func (oc *DefaultNetworkController) addEgressNode(node *v1.Node) error { +func (e *EgressIPController) addEgressNode(node *corev1.Node) error { if node == nil { return nil } - if oc.isLocalZoneNode(node) { + if e.isLocalZoneNode(node) { klog.V(5).Infof("Egress node: %s about to be initialized", node.Name) - if config.OVNKubernetesFeature.EnableInterconnect && oc.zone != types.OvnDefaultZone { + if config.OVNKubernetesFeature.EnableInterconnect && e.zone != types.OvnDefaultZone { // NOTE: EgressIP is not supported on multi-nodes-in-same-zone case // NOTE2: We don't want this route for all-nodes-in-same-zone (almost nonIC a.k.a single zone) case because // it makes no sense - all nodes are connected via the same ovn_cluster_router // NOTE3: When the node gets deleted we do not remove this route intentionally because // on IC if the node is gone, then the ovn_cluster_router is also gone along with all // the routes on it. - if err := libovsdbutil.CreateDefaultRouteToExternal(oc.nbClient, oc.GetNetworkScopedClusterRouterName(), oc.GetNetworkScopedGWRouterName(node.Name)); err != nil { - return err + processNetworkFn := func(ni util.NetInfo) error { + clusterSubnetsNetEntry := ni.Subnets() + if len(clusterSubnetsNetEntry) == 0 { + return nil + } + if err := libovsdbutil.CreateDefaultRouteToExternal(e.nbClient, ni.GetNetworkScopedClusterRouterName(), + ni.GetNetworkScopedGWRouterName(node.Name), clusterSubnetsNetEntry); err != nil { + return fmt.Errorf("failed to create route to external for network %s: %v", ni.GetNetworkName(), err) + } + return nil + } + ni, err := e.nadController.GetNetwork(types.DefaultNetworkName) + if err != nil { + return fmt.Errorf("failed to get default network from NAD controller") + } + if err = processNetworkFn(ni); err != nil { + return fmt.Errorf("failed to process default network: %v", err) + } + if !util.IsNetworkSegmentationSupportEnabled() { + return nil + } + if err = e.nadController.DoWithLock(processNetworkFn); err != nil { + return fmt.Errorf("failed to process all user defined networks route to external: %v", err) } } } @@ -1302,46 +1975,96 @@ func (oc *DefaultNetworkController) addEgressNode(node *v1.Node) error { // egress node experiences problems we want to move all egress IP assignment // away from that node elsewhere so that the pods using the egress IP can // continue to do so without any issues. -func (oc *DefaultNetworkController) initClusterEgressPolicies(nodes []interface{}) error { - if err := InitClusterEgressPolicies(oc.nbClient, oc.addressSetFactory, oc.controllerName, oc.GetNetworkScopedClusterRouterName()); err != nil { - return err +func (e *EgressIPController) initClusterEgressPolicies(nodes []interface{}) error { + // Init default network + defaultNetInfo, err := e.nadController.GetNetwork(types.DefaultNetworkName) + if err != nil { + return fmt.Errorf("failed to get default network: %v", err) + } + subnets := util.GetAllClusterSubnetsFromEntries(defaultNetInfo.Subnets()) + if err := InitClusterEgressPolicies(e.nbClient, e.addressSetFactory, defaultNetInfo, subnets, e.controllerName); err != nil { + return fmt.Errorf("failed to initialize networks cluster logical router egress policies for the default network: %v", err) } for _, node := range nodes { node := node.(*kapi.Node) - - if err := DeleteLegacyDefaultNoRerouteNodePolicies(oc.nbClient, oc.GetNetworkScopedClusterRouterName(), node.Name); err != nil { - return err + if err := DeleteLegacyDefaultNoRerouteNodePolicies(e.nbClient, defaultNetInfo.GetNetworkScopedClusterRouterName(), node.Name); err != nil { + return fmt.Errorf("failed to delete legacy default no reroute to nodes for node %s: %v", node.Name, err) } } - return nil + return e.nadController.DoWithLock(func(network util.NetInfo) error { + if network.GetNetworkName() == types.DefaultNetworkName { + return nil + } + subnets = util.GetAllClusterSubnetsFromEntries(network.Subnets()) + if err := InitClusterEgressPolicies(e.nbClient, e.addressSetFactory, network, subnets, e.controllerName); err != nil { + return fmt.Errorf("failed to initialize networks cluster logical router egress policies for network %s: %v", network.GetNetworkName(), err) + } + for _, node := range nodes { + node := node.(*kapi.Node) + if err := DeleteLegacyDefaultNoRerouteNodePolicies(e.nbClient, network.GetNetworkScopedClusterRouterName(), node.Name); err != nil { + return fmt.Errorf("failed to delete legacy default no reroute node policies for node %s and network %s: %v", node.Name, network.GetNetworkName(), err) + } + } + return nil + }) } // InitClusterEgressPolicies creates the global no reroute policies and address-sets // required by the egressIP and egressServices features. -func InitClusterEgressPolicies(nbClient libovsdbclient.Client, addressSetFactory addressset.AddressSetFactory, - controllerName, clusterRouter string) error { - v4ClusterSubnet, v6ClusterSubnet := util.GetClusterSubnets() - if err := createDefaultNoReroutePodPolicies(nbClient, clusterRouter, v4ClusterSubnet, v6ClusterSubnet); err != nil { - return err +func InitClusterEgressPolicies(nbClient libovsdbclient.Client, addressSetFactory addressset.AddressSetFactory, ni util.NetInfo, + clusterSubnets []*net.IPNet, controllerName string) error { + var v4ClusterSubnet, v6ClusterSubnet []*net.IPNet + for _, subnet := range clusterSubnets { + if utilnet.IsIPv6CIDR(subnet) { + v6ClusterSubnet = append(v6ClusterSubnet, subnet) + } else { + v4ClusterSubnet = append(v4ClusterSubnet, subnet) + } } - if err := createDefaultNoRerouteServicePolicies(nbClient, clusterRouter, v4ClusterSubnet, v6ClusterSubnet); err != nil { - return err + var v4JoinSubnet, v6JoinSubnet *net.IPNet + var err error + if len(v4ClusterSubnet) > 0 { + if config.Gateway.V4JoinSubnet == "" { + return fmt.Errorf("network %s: cannot process IPv4 addresses because no IPv4 join subnet is available", ni.GetNetworkName()) + } + _, v4JoinSubnet, err = net.ParseCIDR(config.Gateway.V4JoinSubnet) + if err != nil { + return fmt.Errorf("network %s: failed to parse IPv4 join subnet: %v", ni.GetNetworkName(), err) + } } - if err := createDefaultNoRerouteReplyTrafficPolicy(nbClient, clusterRouter); err != nil { - return err + if len(v6ClusterSubnet) > 0 { + if config.Gateway.V6JoinSubnet == "" { + return fmt.Errorf("network %s: cannot process IPv6 addresses because no IPv6 join subnet is available", ni.GetNetworkName()) + } + _, v6JoinSubnet, err = net.ParseCIDR(config.Gateway.V6JoinSubnet) + if err != nil { + return fmt.Errorf("network %s: failed to parse IPv6 join subnet: %v", ni.GetNetworkName(), err) + } + } + router := ni.GetNetworkScopedClusterRouterName() + if err = createDefaultNoReroutePodPolicies(nbClient, ni.GetNetworkName(), controllerName, router, v4ClusterSubnet, v6ClusterSubnet); err != nil { + return fmt.Errorf("failed to create no reroute policies for pods on network %s: %v", ni.GetNetworkName(), err) + } + if err = createDefaultNoRerouteServicePolicies(nbClient, ni.GetNetworkName(), controllerName, router, v4ClusterSubnet, v6ClusterSubnet, + v4JoinSubnet, v6JoinSubnet); err != nil { + return fmt.Errorf("failed to create no reroute policies for services on network %s: %v", ni.GetNetworkName(), err) + } + if err = createDefaultNoRerouteReplyTrafficPolicy(nbClient, ni.GetNetworkName(), controllerName, router); err != nil { + return fmt.Errorf("failed to create no reroute reply traffic policy for network %s: %v", ni.GetNetworkName(), err) } // ensure the address-set for storing nodeIPs exists - dbIDs := getEgressIPAddrSetDbIDs(NodeIPAddrSetName, controllerName) - if _, err := addressSetFactory.EnsureAddressSet(dbIDs); err != nil { + // The address set with controller name 'default' is shared with all networks + dbIDs := getEgressIPAddrSetDbIDs(NodeIPAddrSetName, types.DefaultNetworkName, DefaultNetworkControllerName) + if _, err = addressSetFactory.EnsureAddressSet(dbIDs); err != nil { return fmt.Errorf("cannot ensure that addressSet %s exists %v", NodeIPAddrSetName, err) } // ensure the address-set for storing egressIP pods exists - dbIDs = getEgressIPAddrSetDbIDs(EgressIPServedPodsAddrSetName, controllerName) - _, err := addressSetFactory.EnsureAddressSet(dbIDs) + dbIDs = getEgressIPAddrSetDbIDs(EgressIPServedPodsAddrSetName, ni.GetNetworkName(), controllerName) + _, err = addressSetFactory.EnsureAddressSet(dbIDs) if err != nil { - return fmt.Errorf("cannot ensure that addressSet for egressIP pods %s exists %v", EgressIPServedPodsAddrSetName, err) + return fmt.Errorf("cannot ensure that addressSet for egressIP pods %s exists for network %s: %v", EgressIPServedPodsAddrSetName, ni.GetNetworkName(), err) } // ensure the address-set for storing egressservice pod backends exists @@ -1351,6 +2074,12 @@ func InitClusterEgressPolicies(nbClient libovsdbclient.Client, addressSetFactory return fmt.Errorf("cannot ensure that addressSet for egressService pods %s exists %v", egresssvc.EgressServiceServedPodsAddrSetName, err) } + if !ni.IsDefault() && util.IsNetworkSegmentationSupportEnabled() { + v4, v6 := len(v4ClusterSubnet) > 0, len(v6ClusterSubnet) > 0 + if err = ensureDefaultNoRerouteUDNEnabledSvcPolicies(nbClient, addressSetFactory, ni, controllerName, v4, v6); err != nil { + return fmt.Errorf("failed to ensure no reroute for UDN enabled services for network %s: %v", ni.GetNetworkName(), err) + } + } return nil } @@ -1385,6 +2114,11 @@ type podAssignmentState struct { // list of other egressIP object names that also match this pod but are on standby standbyEgressIPNames sets.Set[string] + + podIPs []net.IP + + // network attached to the pod + network util.NetInfo } // Clone deep-copies and returns the copied podAssignmentState @@ -1392,57 +2126,33 @@ func (pas *podAssignmentState) Clone() *podAssignmentState { clone := &podAssignmentState{ egressIPName: pas.egressIPName, standbyEgressIPNames: pas.standbyEgressIPNames.Clone(), + podIPs: make([]net.IP, 0, len(pas.podIPs)), + network: pas.network, } clone.egressStatuses = egressStatuses{make(map[egressipv1.EgressIPStatusItem]string, len(pas.egressStatuses.statusMap))} for k, v := range pas.statusMap { clone.statusMap[k] = v } + clone.podIPs = append(clone.podIPs, pas.podIPs...) return clone } -type egressIPZoneController struct { - // network information - util.NetInfo - - // podAssignmentMutex is used to ensure safe access to podAssignment. - // Currently WatchEgressIP, WatchEgressNamespace and WatchEgressPod could - // all access that map simultaneously, hence why this guard is needed. - podAssignmentMutex *sync.Mutex - // nodeUpdateMutex is used for two reasons: - // (1) to ensure safe handling of node ip address updates. VIP addresses are - // dynamic and might move across nodes. - // (2) used in ensureDefaultNoRerouteQoSRules function to ensure - // creating QoS rules is thread safe since otherwise when two nodes are added - // at the same time by two different threads we end up creating duplicate - // QoS rules in database due to libovsdb cache race - nodeUpdateMutex *sync.Mutex - // podAssignment is a cache used for keeping track of which egressIP status - // has been setup for each pod. The key is defined by getPodKey - podAssignment map[string]*podAssignmentState - // libovsdb northbound client interface - nbClient libovsdbclient.Client - // watchFactory watching k8s objects - watchFactory *factory.WatchFactory - // A cache that maintains all nodes in the cluster, - // value will be true if local to this zone and false otherwise - nodeZoneState *syncmap.SyncMap[bool] -} - // addStandByEgressIPAssignment does the same setup that is done by addPodEgressIPAssignments but for // the standby egressIP. This must always be called with a lock on podAssignmentState mutex // This is special case function called only from deleteEgressIPAssignments, don't use this for normal setup // Any failure from here will not be retried, its a corner case undefined behaviour -func (oc *DefaultNetworkController) addStandByEgressIPAssignment(podKey string, podStatus *podAssignmentState) error { +func (e *EgressIPController) addStandByEgressIPAssignment(ni util.NetInfo, podKey string, podStatus *podAssignmentState) error { podNamespace, podName := getPodNamespaceAndNameFromKey(podKey) - pod, err := oc.watchFactory.GetPod(podNamespace, podName) + pod, err := e.watchFactory.GetPod(podNamespace, podName) if err != nil { return err } eipsToAssign := podStatus.standbyEgressIPNames.UnsortedList() var eipToAssign string var eip *egressipv1.EgressIP + var mark util.EgressIPMark for _, eipName := range eipsToAssign { - eip, err = oc.watchFactory.GetEgressIP(eipName) + eip, err = e.watchFactory.GetEgressIP(eipName) if err != nil { klog.Warningf("There seems to be a stale standby egressIP %s for pod %s "+ "which doesn't exist: %v; removing this standby egressIP from cache...", eipName, podKey, err) @@ -1450,22 +2160,46 @@ func (oc *DefaultNetworkController) addStandByEgressIPAssignment(podKey string, continue } eipToAssign = eipName // use the first EIP we find successfully + if ni.IsSecondary() { + mark = getEgressIPPktMark(eip.Name, eip.Annotations) + } break } if eipToAssign == "" { klog.Infof("No standby egressIP's found for pod %s", podKey) return nil } - + // get IPs + nadName := ni.GetNetworkName() + if ni.IsSecondary() { + nadNames := ni.GetNADs() + if len(nadNames) == 0 { + return fmt.Errorf("expected at least one NAD name for Namespace %s", pod.Namespace) + } + nadName = nadNames[0] // there should only be one active network + } + podIPNets, err := e.getPodIPs(ni, pod, nadName) + if err != nil { + return fmt.Errorf("failed to get pod %s/%s IPs using nad name %q: %v", pod.Namespace, pod.Name, nadName, err) + } + if len(podIPNets) == 0 { + return fmt.Errorf("no IP(s) available for pod %s/%s on network %s", pod.Namespace, pod.Name, ni.GetNetworkName()) + } + podIPs := make([]net.IP, 0, len(podIPNets)) + for _, podIPNet := range podIPNets { + podIPs = append(podIPs, podIPNet.IP) + } podState := &podAssignmentState{ egressStatuses: egressStatuses{make(map[egressipv1.EgressIPStatusItem]string)}, standbyEgressIPNames: podStatus.standbyEgressIPNames, + podIPs: podIPs, + network: ni, } - oc.eIPC.podAssignment[podKey] = podState + e.podAssignment[podKey] = podState // NOTE: We let addPodEgressIPAssignments take care of setting egressIPName and egressStatuses and removing it from standBy - err = oc.addPodEgressIPAssignments(eipToAssign, eip.Status.Items, pod) + err = e.addPodEgressIPAssignments(ni, eipToAssign, eip.Status.Items, mark, pod) if err != nil { - return err + return fmt.Errorf("failed to add standby pod %s/%s for network %s: %v", pod.Namespace, pod.Name, ni.GetNetworkName(), err) } return nil } @@ -1474,7 +2208,8 @@ func (oc *DefaultNetworkController) addStandByEgressIPAssignment(podKey string, // (routing pod traffic to the egress node) and NAT objects on the egress node // (SNAT-ing to the egress IP). // This function should be called with lock on nodeZoneState cache key status.Node and pod.Spec.NodeName -func (e *egressIPZoneController) addPodEgressIPAssignment(egressIPName string, status egressipv1.EgressIPStatusItem, pod *kapi.Pod, podIPs []*net.IPNet) (err error) { +func (e *EgressIPController) addPodEgressIPAssignment(ni util.NetInfo, egressIPName string, status egressipv1.EgressIPStatusItem, mark util.EgressIPMark, + pod *kapi.Pod, podIPs []*net.IPNet) (err error) { if config.Metrics.EnableScaleMetrics { start := time.Now() defer func() { @@ -1498,7 +2233,7 @@ func (e *egressIPZoneController) addPodEgressIPAssignment(egressIPName string, s return fmt.Errorf("failed to get node %s egress IP config: %w", eNode.Name, err) } isOVNNetwork := util.IsOVNNetwork(parsedNodeEIPConfig, eIPIP) - nextHopIP, err := e.getNextHop(status.Node, status.EgressIP, egressIPName, isLocalZoneEgressNode, isOVNNetwork) + nextHopIP, err := e.getNextHop(ni, status.Node, status.EgressIP, egressIPName, isLocalZoneEgressNode, isOVNNetwork) if err != nil || nextHopIP == "" { return fmt.Errorf("failed to determine next hop for pod %s/%s when configuring egress IP %s"+ " IP %s: %v", pod.Namespace, pod.Name, egressIPName, status.EgressIP, err) @@ -1506,14 +2241,21 @@ func (e *egressIPZoneController) addPodEgressIPAssignment(egressIPName string, s var ops []ovsdb.Operation if loadedEgressNode && isLocalZoneEgressNode { if isOVNNetwork { - ops, err = e.createNATRuleOps(nil, podIPs, status, egressIPName) - if err != nil { - return fmt.Errorf("unable to create NAT rule ops for status: %v, err: %v", status, err) + if ni.IsDefault() { + ops, err = e.createNATRuleOps(ni, nil, podIPs, status, egressIPName, pod.Namespace, pod.Name) + if err != nil { + return fmt.Errorf("unable to create NAT rule ops for status: %v, err: %v", status, err) + } + } else if ni.IsSecondary() { + ops, err = e.createGWMarkPolicyOps(ni, ops, podIPs, status, mark, pod.Namespace, pod.Name, egressIPName) + if err != nil { + return fmt.Errorf("unable to create GW router LRP ops to packet mark pod %s/%s: %v", pod.Namespace, pod.Name, err) + } } } if config.OVNKubernetesFeature.EnableInterconnect && !isOVNNetwork && (loadedPodNode && !isLocalZonePod) { // configure reroute for non-local-zone pods on egress nodes - ops, err = e.createReroutePolicyOps(ops, podIPs, status, egressIPName, nextHopIP) + ops, err = e.createReroutePolicyOps(ni, ops, podIPs, status, mark, egressIPName, nextHopIP, pod.Namespace, pod.Name) if err != nil { return fmt.Errorf("unable to create logical router policy ops %v, err: %v", status, err) } @@ -1523,11 +2265,11 @@ func (e *egressIPZoneController) addPodEgressIPAssignment(egressIPName string, s // exec when node is local OR when pods are local // don't add a reroute policy if the egress node towards which we are adding this doesn't exist if loadedEgressNode && loadedPodNode && isLocalZonePod { - ops, err = e.createReroutePolicyOps(ops, podIPs, status, egressIPName, nextHopIP) + ops, err = e.createReroutePolicyOps(ni, ops, podIPs, status, mark, egressIPName, nextHopIP, pod.Namespace, pod.Name) if err != nil { return fmt.Errorf("unable to create logical router policy ops, err: %v", err) } - ops, err = e.deleteExternalGWPodSNATOps(ops, pod, podIPs, status, isOVNNetwork) + ops, err = e.deleteExternalGWPodSNATOps(ni, ops, pod, podIPs, status, isOVNNetwork) if err != nil { return err } @@ -1539,7 +2281,7 @@ func (e *egressIPZoneController) addPodEgressIPAssignment(egressIPName string, s // deletePodEgressIPAssignment deletes the OVN programmed egress IP // configuration mentioned for addPodEgressIPAssignment. // This function should be called with lock on nodeZoneState cache key status.Node and pod.Spec.NodeName -func (e *egressIPZoneController) deletePodEgressIPAssignment(egressIPName string, status egressipv1.EgressIPStatusItem, pod *kapi.Pod, podIPs []*net.IPNet) (err error) { +func (e *EgressIPController) deletePodEgressIPAssignment(ni util.NetInfo, egressIPName string, status egressipv1.EgressIPStatusItem, pod *kapi.Pod) (err error) { if config.Metrics.EnableScaleMetrics { start := time.Now() defer func() { @@ -1567,21 +2309,20 @@ func (e *egressIPZoneController) deletePodEgressIPAssignment(egressIPName string klog.Warningf("Unable to get node %s egress IP config: %v", eNode.Name, err) } else { isOVNNetwork = util.IsOVNNetwork(parsedEIPConfig, eIPIP) - nextHopIP, err = e.getNextHop(status.Node, status.EgressIP, egressIPName, isLocalZoneEgressNode, isOVNNetwork) + nextHopIP, err = e.getNextHop(ni, status.Node, status.EgressIP, egressIPName, isLocalZoneEgressNode, isOVNNetwork) if err != nil { klog.Warningf("Unable to determine next hop for egress IP %s IP %s assigned to node %s: %v", egressIPName, status.EgressIP, status.Node, err) } } } - var ops []ovsdb.Operation if !loadedPodNode || isLocalZonePod { // node is deleted (we can't determine zone so we always try and nuke OR pod is local to zone) - ops, err = e.addExternalGWPodSNATOps(nil, pod.Namespace, pod.Name, status) + ops, err = e.addExternalGWPodSNATOps(ni, nil, pod.Namespace, pod.Name, status) if err != nil { return err } - ops, err = e.deleteReroutePolicyOps(ops, podIPs, status, egressIPName, nextHopIP) + ops, err = e.deleteReroutePolicyOps(ni, ops, status, egressIPName, nextHopIP, pod.Namespace, pod.Name) if errors.Is(err, libovsdbclient.ErrNotFound) { // if the gateway router join IP setup is already gone, then don't count it as error. klog.Warningf("Unable to delete logical router policy, err: %v", err) @@ -1593,14 +2334,21 @@ func (e *egressIPZoneController) deletePodEgressIPAssignment(egressIPName string if loadedEgressNode && isLocalZoneEgressNode { if config.OVNKubernetesFeature.EnableInterconnect && !isOVNNetwork && (!loadedPodNode || !isLocalZonePod) { // node is deleted (we can't determine zone so we always try and nuke OR pod is remote to zone) // delete reroute for non-local-zone pods on egress nodes - ops, err = e.deleteReroutePolicyOps(ops, podIPs, status, egressIPName, nextHopIP) + ops, err = e.deleteReroutePolicyOps(ni, ops, status, egressIPName, nextHopIP, pod.Namespace, pod.Name) if err != nil { return fmt.Errorf("unable to delete logical router static route ops %v, err: %v", status, err) } } - ops, err = e.deleteNATRuleOps(ops, podIPs, status, egressIPName) - if err != nil { - return fmt.Errorf("unable to delete NAT rule for status: %v, err: %v", status, err) + if ni.IsDefault() { + ops, err = e.deleteNATRuleOps(ni, ops, status, egressIPName, pod.Namespace, pod.Name) + if err != nil { + return fmt.Errorf("unable to delete NAT rule for status: %v, err: %v", status, err) + } + } else if ni.IsSecondary() { + ops, err = e.deleteGWMarkPolicyOps(ni, ops, status, pod.Namespace, pod.Name, egressIPName) + if err != nil { + return fmt.Errorf("unable to create GW router packet mark LRPs delete ops for pod %s/%s: %v", pod.Namespace, pod.Name, err) + } } } _, err = libovsdbops.TransactAndCheck(e.nbClient, ops) @@ -1618,8 +2366,8 @@ func (e *egressIPZoneController) deletePodEgressIPAssignment(egressIPName string // check the informer cache since on pod deletion the event handlers are // triggered after the update to the informer cache. We should not re-add the // external GW setup in those cases. -func (e *egressIPZoneController) addExternalGWPodSNAT(podNamespace, podName string, status egressipv1.EgressIPStatusItem) error { - ops, err := e.addExternalGWPodSNATOps(nil, podNamespace, podName, status) +func (e *EgressIPController) addExternalGWPodSNAT(ni util.NetInfo, podNamespace, podName string, status egressipv1.EgressIPStatusItem) error { + ops, err := e.addExternalGWPodSNATOps(ni, nil, podNamespace, podName, status) if err != nil { return fmt.Errorf("error creating ops for adding external gw pod snat: %+v", err) } @@ -1642,7 +2390,7 @@ func (e *egressIPZoneController) addExternalGWPodSNAT(podNamespace, podName stri // triggered after the update to the informer cache. We should not re-add the // external GW setup in those cases. // This function should be called with lock on nodeZoneState cache key pod.Spec.Name -func (e *egressIPZoneController) addExternalGWPodSNATOps(ops []ovsdb.Operation, podNamespace, podName string, status egressipv1.EgressIPStatusItem) ([]ovsdb.Operation, error) { +func (e *EgressIPController) addExternalGWPodSNATOps(ni util.NetInfo, ops []ovsdb.Operation, podNamespace, podName string, status egressipv1.EgressIPStatusItem) ([]ovsdb.Operation, error) { if config.Gateway.DisableSNATMultipleGWs { pod, err := e.watchFactory.GetPod(podNamespace, podName) if err != nil { @@ -1660,7 +2408,7 @@ func (e *egressIPZoneController) addExternalGWPodSNATOps(ops []ovsdb.Operation, if err != nil { return nil, err } - ops, err = addOrUpdatePodSNATOps(e.nbClient, e.GetNetworkScopedGWRouterName(pod.Spec.NodeName), extIPs, podIPs, ops) + ops, err = addOrUpdatePodSNATOps(e.nbClient, ni.GetNetworkScopedGWRouterName(pod.Spec.NodeName), extIPs, podIPs, "", ops) if err != nil { return nil, err } @@ -1671,15 +2419,19 @@ func (e *egressIPZoneController) addExternalGWPodSNATOps(ops []ovsdb.Operation, } // deleteExternalGWPodSNATOps creates ops for the required external GW teardown for the given pod -func (e *egressIPZoneController) deleteExternalGWPodSNATOps(ops []ovsdb.Operation, pod *kapi.Pod, podIPs []*net.IPNet, status egressipv1.EgressIPStatusItem, isOVNNetwork bool) ([]ovsdb.Operation, error) { +func (e *EgressIPController) deleteExternalGWPodSNATOps(ni util.NetInfo, ops []ovsdb.Operation, pod *kapi.Pod, podIPs []*net.IPNet, status egressipv1.EgressIPStatusItem, isOVNNetwork bool) ([]ovsdb.Operation, error) { if config.Gateway.DisableSNATMultipleGWs && status.Node == pod.Spec.NodeName && isOVNNetwork { + affectedIPs := util.MatchAllIPNetFamily(utilnet.IsIPv6String(status.EgressIP), podIPs) + if len(affectedIPs) == 0 { + return nil, nil // noting to do. + } // remove snats to->nodeIP (from the node where pod exists if that node is also serving // as an egress node for this pod) for these podIPs before adding the snat to->egressIP extIPs, err := getExternalIPsGR(e.watchFactory, pod.Spec.NodeName) if err != nil { return nil, err } - ops, err = deletePodSNATOps(e.nbClient, ops, e.GetNetworkScopedGWRouterName(pod.Spec.NodeName), extIPs, podIPs) + ops, err = deletePodSNATOps(e.nbClient, ops, ni.GetNetworkScopedGWRouterName(pod.Spec.NodeName), extIPs, affectedIPs, "") if err != nil { return nil, err } @@ -1690,8 +2442,8 @@ func (e *egressIPZoneController) deleteExternalGWPodSNATOps(ops []ovsdb.Operatio return ops, nil } -func (e *egressIPZoneController) getGatewayRouterJoinIP(node string, wantsIPv6 bool) (net.IP, error) { - gatewayIPs, err := libovsdbutil.GetLRPAddrs(e.nbClient, types.GWRouterToJoinSwitchPrefix+e.GetNetworkScopedGWRouterName(node)) +func (e *EgressIPController) getGatewayRouterJoinIP(ni util.NetInfo, node string, wantsIPv6 bool) (net.IP, error) { + gatewayIPs, err := libovsdbutil.GetLRPAddrs(e.nbClient, types.GWRouterToJoinSwitchPrefix+ni.GetNetworkScopedGWRouterName(node)) if err != nil { return nil, fmt.Errorf("attempt at finding node gateway router network information failed, err: %w", err) } @@ -1710,7 +2462,7 @@ func ipFamilyName(isIPv6 bool) string { return string(IPFamilyValueV4) } -func (e *egressIPZoneController) getTransitIP(nodeName string, wantsIPv6 bool) (string, error) { +func (e *EgressIPController) getTransitIP(nodeName string, wantsIPv6 bool) (string, error) { // fetch node annotation of the egress node node, err := e.watchFactory.GetNode(nodeName) if err != nil { @@ -1731,7 +2483,7 @@ func (e *egressIPZoneController) getTransitIP(nodeName string, wantsIPv6 bool) ( // a secondary host network. If we failed to look up the information required to determine this, an error will be returned // however if we are able to lookup the information, but it doesnt exist, called must be able to tolerate a blank next hop // and no error returned. This means we searched successfully but could not find the information required to generate the next hop. -func (e *egressIPZoneController) getNextHop(egressNodeName, egressIP, egressIPName string, isLocalZoneEgressNode, isOVNNetwork bool) (string, error) { +func (e *EgressIPController) getNextHop(ni util.NetInfo, egressNodeName, egressIP, egressIPName string, isLocalZoneEgressNode, isOVNNetwork bool) (string, error) { var nextHopIP string var err error isEgressIPv6 := utilnet.IsIPv6String(egressIP) @@ -1739,7 +2491,7 @@ func (e *egressIPZoneController) getNextHop(egressNodeName, egressIP, egressIPNa // is present in the nodeZoneState cache. Since we call it with lock on cache, we are safe here. if isLocalZoneEgressNode { if isOVNNetwork { - gatewayRouterIP, err := e.getGatewayRouterJoinIP(egressNodeName, isEgressIPv6) + gatewayRouterIP, err := e.getGatewayRouterJoinIP(ni, egressNodeName, isEgressIPv6) if err != nil && !errors.Is(err, libovsdbclient.ErrNotFound) { return "", fmt.Errorf("unable to retrieve gateway IP for node: %s, protocol is IPv6: %v, err: %w", egressNodeName, isEgressIPv6, err) @@ -1750,7 +2502,7 @@ func (e *egressIPZoneController) getNextHop(egressNodeName, egressIP, egressIPNa } nextHopIP = gatewayRouterIP.String() } else { - mgmtPort := &nbdb.LogicalSwitchPort{Name: e.GetNetworkScopedK8sMgmtIntfName(egressNodeName)} + mgmtPort := &nbdb.LogicalSwitchPort{Name: ni.GetNetworkScopedK8sMgmtIntfName(egressNodeName)} mgmtPort, err := libovsdbops.GetLogicalSwitchPort(e.nbClient, mgmtPort) if err != nil && !errors.Is(err, libovsdbclient.ErrNotFound) { return "", fmt.Errorf("failed to get next hop IP for secondary host network and egress IP %s for node %s "+ @@ -1800,27 +2552,34 @@ func (e *egressIPZoneController) getNextHop(egressNodeName, egressIP, egressIPNa // to redirect the pods to the appropriate management port or if interconnect is // enabled, the appropriate transit switch port. // This function should be called with lock on nodeZoneState cache key status.Node -func (e *egressIPZoneController) createReroutePolicyOps(ops []ovsdb.Operation, podIPNets []*net.IPNet, status egressipv1.EgressIPStatusItem, egressIPName, nextHopIP string) ([]ovsdb.Operation, error) { +func (e *EgressIPController) createReroutePolicyOps(ni util.NetInfo, ops []ovsdb.Operation, podIPNets []*net.IPNet, status egressipv1.EgressIPStatusItem, + mark util.EgressIPMark, egressIPName, nextHopIP, podNamespace, podName string) ([]ovsdb.Operation, error) { isEgressIPv6 := utilnet.IsIPv6String(status.EgressIP) + ipFamily := getEIPIPFamily(isEgressIPv6) + options := make(map[string]string) + if ni.IsSecondary() { + if !mark.IsAvailable() { + return nil, fmt.Errorf("egressIP %s object must contain a mark for user defined networks", egressIPName) + } + addPktMarkToLRPOptions(options, mark.String()) + } var err error + dbIDs := getEgressIPLRPReRouteDbIDs(egressIPName, podNamespace, podName, ipFamily, ni.GetNetworkName(), e.controllerName) + p := libovsdbops.GetPredicate[*nbdb.LogicalRouterPolicy](dbIDs, nil) // Handle all pod IPs that match the egress IP address family for _, podIPNet := range util.MatchAllIPNetFamily(isEgressIPv6, podIPNets) { - lrp := nbdb.LogicalRouterPolicy{ - Match: fmt.Sprintf("%s.src == %s", ipFamilyName(isEgressIPv6), podIPNet.IP.String()), - Priority: types.EgressIPReroutePriority, - Nexthops: []string{nextHopIP}, - Action: nbdb.LogicalRouterPolicyActionReroute, - ExternalIDs: map[string]string{ - "name": egressIPName, - }, - } - p := func(item *nbdb.LogicalRouterPolicy) bool { - return item.Match == lrp.Match && item.Priority == lrp.Priority && item.ExternalIDs["name"] == lrp.ExternalIDs["name"] - } - ops, err = libovsdbops.CreateOrAddNextHopsToLogicalRouterPolicyWithPredicateOps(e.nbClient, ops, e.GetNetworkScopedClusterRouterName(), &lrp, p) + lrp := nbdb.LogicalRouterPolicy{ + Match: fmt.Sprintf("%s.src == %s", ipFamilyName(isEgressIPv6), podIPNet.IP.String()), + Priority: types.EgressIPReroutePriority, + Nexthops: []string{nextHopIP}, + Action: nbdb.LogicalRouterPolicyActionReroute, + ExternalIDs: dbIDs.GetExternalIDs(), + Options: options, + } + ops, err = libovsdbops.CreateOrAddNextHopsToLogicalRouterPolicyWithPredicateOps(e.nbClient, ops, ni.GetNetworkScopedClusterRouterName(), &lrp, p) if err != nil { - return nil, fmt.Errorf("error creating logical router policy %+v on router %s: %v", lrp, e.GetNetworkScopedClusterRouterName(), err) + return nil, fmt.Errorf("error creating logical router policy %+v on router %s: %v", lrp, ni.GetNetworkScopedClusterRouterName(), err) } } return ops, nil @@ -1836,35 +2595,102 @@ func (e *egressIPZoneController) createReroutePolicyOps(ops []ovsdb.Operation, p // if caller fails to find a next hop, we clear the LRPs for that specific Egress IP // which will break HA momentarily // This function should be called with lock on nodeZoneState cache key status.Node -func (e *egressIPZoneController) deleteReroutePolicyOps(ops []ovsdb.Operation, podIPNets []*net.IPNet, status egressipv1.EgressIPStatusItem, egressIPName, nextHopIP string) ([]ovsdb.Operation, error) { +func (e *EgressIPController) deleteReroutePolicyOps(ni util.NetInfo, ops []ovsdb.Operation, status egressipv1.EgressIPStatusItem, + egressIPName, nextHopIP, podNamespace, podName string) ([]ovsdb.Operation, error) { isEgressIPv6 := utilnet.IsIPv6String(status.EgressIP) + ipFamily := getEIPIPFamily(isEgressIPv6) var err error // Handle all pod IPs that match the egress IP address family - for _, podIPNet := range util.MatchAllIPNetFamily(isEgressIPv6, podIPNets) { - filterOption := fmt.Sprintf("%s.src == %s", ipFamilyName(isEgressIPv6), podIPNet.IP.String()) - p := func(item *nbdb.LogicalRouterPolicy) bool { - return item.Match == filterOption && item.Priority == types.EgressIPReroutePriority && item.ExternalIDs["name"] == egressIPName + dbIDs := getEgressIPLRPReRouteDbIDs(egressIPName, podNamespace, podName, ipFamily, ni.GetNetworkName(), e.controllerName) + p := libovsdbops.GetPredicate[*nbdb.LogicalRouterPolicy](dbIDs, nil) + if nextHopIP != "" { + ops, err = libovsdbops.DeleteNextHopFromLogicalRouterPoliciesWithPredicateOps(e.nbClient, ops, ni.GetNetworkScopedClusterRouterName(), p, nextHopIP) + if err != nil { + return nil, fmt.Errorf("error removing nexthop IP %s from egress ip %s policies on router %s: %v", + nextHopIP, egressIPName, ni.GetNetworkScopedClusterRouterName(), err) } - if nextHopIP != "" { - ops, err = libovsdbops.DeleteNextHopFromLogicalRouterPoliciesWithPredicateOps(e.nbClient, ops, e.GetNetworkScopedClusterRouterName(), p, nextHopIP) - if err != nil { - return nil, fmt.Errorf("error removing nexthop IP %s from egress ip %s policies on router %s: %v", - nextHopIP, egressIPName, e.GetNetworkScopedClusterRouterName(), err) - } - } else { - klog.Errorf("Caller failed to pass next hop for EgressIP %s and IP %s. Deleting all LRPs. This will break HA momentarily", - egressIPName, status.EgressIP) - // since next hop was not found, delete everything to ensure no stale entries however this will break load - // balancing between hops, but we offer no guarantees except one of the EIPs will work - ops, err = libovsdbops.DeleteLogicalRouterPolicyWithPredicateOps(e.nbClient, ops, e.GetNetworkScopedClusterRouterName(), p) - if err != nil { - return nil, fmt.Errorf("failed to create logical router policy operations on ovn_cluster_router: %v", err) - } + } else { + klog.Errorf("Caller failed to pass next hop for EgressIP %s and IP %s. Deleting all LRPs. This will break HA momentarily", + egressIPName, status.EgressIP) + // since next hop was not found, delete everything to ensure no stale entries however this will break load + // balancing between hops, but we offer no guarantees except one of the EIPs will work + ops, err = libovsdbops.DeleteLogicalRouterPolicyWithPredicateOps(e.nbClient, ops, ni.GetNetworkScopedClusterRouterName(), p) + if err != nil { + return nil, fmt.Errorf("failed to create logical router policy delete operations on %s: %v", ni.GetNetworkScopedClusterRouterName(), err) } } return ops, nil } +func (e *EgressIPController) createGWMarkPolicyOps(ni util.NetInfo, ops []ovsdb.Operation, podIPNets []*net.IPNet, status egressipv1.EgressIPStatusItem, + mark util.EgressIPMark, podNamespace, podName, egressIPName string) ([]ovsdb.Operation, error) { + isEgressIPv6 := utilnet.IsIPv6String(status.EgressIP) + routerName := ni.GetNetworkScopedGWRouterName(status.Node) + options := make(map[string]string) + if !mark.IsAvailable() { + return nil, fmt.Errorf("egressIP object must contain a mark for user defined networks") + } + addPktMarkToLRPOptions(options, mark.String()) + var err error + ovnIPFamilyName := ipFamilyName(isEgressIPv6) + ipFamilyValue := getEIPIPFamily(isEgressIPv6) + dbIDs := getEgressIPLRPSNATMarkDbIDs(egressIPName, podNamespace, podName, ipFamilyValue, ni.GetNetworkName(), e.controllerName) + // Handle all pod IPs that match the egress IP address family + for _, podIPNet := range util.MatchAllIPNetFamily(isEgressIPv6, podIPNets) { + lrp := nbdb.LogicalRouterPolicy{ + Match: fmt.Sprintf("%s.src == %s && pkt.mark == 0", ovnIPFamilyName, podIPNet.IP.String()), // only add pkt mark if one already doesn't exist + Priority: types.EgressIPSNATMarkPriority, + Action: nbdb.LogicalRouterPolicyActionAllow, + ExternalIDs: dbIDs.GetExternalIDs(), + Options: options, + } + p := libovsdbops.GetPredicate[*nbdb.LogicalRouterPolicy](dbIDs, nil) + ops, err = libovsdbops.CreateOrUpdateLogicalRouterPolicyWithPredicateOps(e.nbClient, ops, routerName, &lrp, p) + if err != nil { + return nil, fmt.Errorf("error creating logical router policy %+v create/update ops for packet marking on router %s: %v", lrp, routerName, err) + } + } + return ops, nil +} + +func (e *EgressIPController) deleteGWMarkPolicyOps(ni util.NetInfo, ops []ovsdb.Operation, status egressipv1.EgressIPStatusItem, + podNamespace, podName, egressIPName string) ([]ovsdb.Operation, error) { + isEgressIPv6 := utilnet.IsIPv6String(status.EgressIP) + routerName := ni.GetNetworkScopedGWRouterName(status.Node) + ipFamilyValue := getEIPIPFamily(isEgressIPv6) + dbIDs := getEgressIPLRPSNATMarkDbIDs(egressIPName, podNamespace, podName, ipFamilyValue, ni.GetNetworkName(), e.controllerName) + p := libovsdbops.GetPredicate[*nbdb.LogicalRouterPolicy](dbIDs, nil) + var err error + ops, err = libovsdbops.DeleteLogicalRouterPolicyWithPredicateOps(e.nbClient, ops, routerName, p) + if err != nil { + return nil, fmt.Errorf("error creating logical router policy delete ops for packet marking on router %s: %v", routerName, err) + } + return ops, nil +} + +func (e *EgressIPController) deleteGWMarkPolicyForStatusOps(ni util.NetInfo, ops []ovsdb.Operation, status egressipv1.EgressIPStatusItem, + egressIPName string) ([]ovsdb.Operation, error) { + isEgressIPv6 := utilnet.IsIPv6String(status.EgressIP) + routerName := ni.GetNetworkScopedGWRouterName(status.Node) + ipFamilyValue := getEIPIPFamily(isEgressIPv6) + predicateIDs := libovsdbops.NewDbObjectIDs(libovsdbops.LogicalRouterPolicyEgressIP, e.controllerName, + map[libovsdbops.ExternalIDKey]string{ + libovsdbops.PriorityKey: fmt.Sprintf("%d", types.EgressIPSNATMarkPriority), + libovsdbops.IPFamilyKey: string(ipFamilyValue), + libovsdbops.NetworkKey: ni.GetNetworkName(), + }) + lrpExtIDPredicate := libovsdbops.GetPredicate[*nbdb.LogicalRouterPolicy](predicateIDs, nil) + p := func(item *nbdb.LogicalRouterPolicy) bool { + return lrpExtIDPredicate(item) && strings.HasPrefix(item.ExternalIDs[libovsdbops.ObjectNameKey.String()], egressIPName+dbIDEIPNamePodDivider) + } + var err error + ops, err = libovsdbops.DeleteLogicalRouterPolicyWithPredicateOps(e.nbClient, ops, routerName, p) + if err != nil { + return nil, fmt.Errorf("error creating logical router policy delete ops for packet marking on router %s: %v", routerName, err) + } + return ops, nil +} + // deleteEgressIPStatusSetup deletes the entire set up in the NB DB for an // EgressIPStatusItem. The set up in the NB DB gets tagged with the name of the // EgressIP, hence lookup the LRP and NAT objects which match that as well as @@ -1872,90 +2698,94 @@ func (e *egressIPZoneController) deleteReroutePolicyOps(ops []ovsdb.Operation, p // completely deleted once the remaining and last nexthop equals the // gatewayRouterIP corresponding to the node in the EgressIPStatusItem, else // just remove the gatewayRouterIP from the list of nexthops -// It also returns the list of podIPs whose routes and SNAT's were deleted // This function should be called with a lock on e.nodeZoneState.status.Node -func (e *egressIPZoneController) deleteEgressIPStatusSetup(name string, status egressipv1.EgressIPStatusItem) ([]net.IP, error) { +func (e *EgressIPController) deleteEgressIPStatusSetup(ni util.NetInfo, name string, status egressipv1.EgressIPStatusItem) error { var err error var ops []ovsdb.Operation - nextHopIP, err := e.attemptToGetNextHopIP(name, status) + nextHopIP, err := e.attemptToGetNextHopIP(ni, name, status) if err != nil { - return nil, fmt.Errorf("failed to delete egress IP %s (%s) because unable to determine next hop: %v", + return fmt.Errorf("failed to delete egress IP %s (%s) because unable to determine next hop: %v", name, status.EgressIP, err) } - - if nextHopIP != "" { - policyPred := func(item *nbdb.LogicalRouterPolicy) bool { - hasIPNexthop := false - for _, nexthop := range item.Nexthops { - if nexthop == nextHopIP { - hasIPNexthop = true - break - } + isIPv6 := utilnet.IsIPv6String(status.EgressIP) + policyPredNextHop := func(item *nbdb.LogicalRouterPolicy) bool { + hasIPNexthop := false + for _, nexthop := range item.Nexthops { + if nexthop == nextHopIP { + hasIPNexthop = true + break } - return item.Priority == types.EgressIPReroutePriority && item.ExternalIDs["name"] == name && hasIPNexthop } - ops, err = libovsdbops.DeleteNextHopFromLogicalRouterPoliciesWithPredicateOps(e.nbClient, ops, e.GetNetworkScopedClusterRouterName(), policyPred, nextHopIP) + return item.Priority == types.EgressIPReroutePriority && hasIPNexthop && + item.ExternalIDs[libovsdbops.NetworkKey.String()] == ni.GetNetworkName() && + item.ExternalIDs[libovsdbops.OwnerControllerKey.String()] == e.controllerName && + item.ExternalIDs[libovsdbops.OwnerTypeKey.String()] == string(libovsdbops.EgressIPOwnerType) && + item.ExternalIDs[libovsdbops.IPFamilyKey.String()] == string(getEIPIPFamily(isIPv6)) && + strings.HasPrefix(item.ExternalIDs[libovsdbops.ObjectNameKey.String()], name+dbIDEIPNamePodDivider) + } + + if nextHopIP != "" { + ops, err = libovsdbops.DeleteNextHopFromLogicalRouterPoliciesWithPredicateOps(e.nbClient, ops, ni.GetNetworkScopedClusterRouterName(), policyPredNextHop, nextHopIP) if err != nil { - return nil, fmt.Errorf("error removing nexthop IP %s from egress ip %s policies on router %s: %v", - nextHopIP, name, e.GetNetworkScopedClusterRouterName(), err) + return fmt.Errorf("error removing nexthop IP %s from egress ip %s policies on router %s: %v", + nextHopIP, name, ni.GetNetworkScopedClusterRouterName(), err) } - } else if ops, err = e.ensureOnlyValidNextHops(name, ops); err != nil { - return nil, err + } else if ops, err = e.ensureOnlyValidNextHops(ni, name, ops); err != nil { + return err } isLocalZoneEgressNode, loadedEgressNode := e.nodeZoneState.Load(status.Node) - var nats []*nbdb.NAT if loadedEgressNode && isLocalZoneEgressNode { - routerName := e.GetNetworkScopedGWRouterName(status.Node) - natPred := func(nat *nbdb.NAT) bool { - // We should delete NATs only from the status.Node that was passed into this function - return nat.ExternalIDs["name"] == name && nat.ExternalIP == status.EgressIP && nat.LogicalPort != nil && *nat.LogicalPort == e.GetNetworkScopedK8sMgmtIntfName(status.Node) - } - nats, err = libovsdbops.FindNATsWithPredicate(e.nbClient, natPred) // save the nats to get the podIPs before that nats get deleted - if err != nil { - return nil, fmt.Errorf("error removing egress ip pods from adress set %s: %v", EgressIPServedPodsAddrSetName, err) - } - ops, err = libovsdbops.DeleteNATsWithPredicateOps(e.nbClient, ops, natPred) - if err != nil { - return nil, fmt.Errorf("error removing egress ip %s nats on router %s: %v", name, routerName, err) + if ni.IsDefault() { + routerName := ni.GetNetworkScopedGWRouterName(status.Node) + natPred := func(nat *nbdb.NAT) bool { + // We should delete NATs only from the status.Node that was passed into this function + return strings.HasPrefix(nat.ExternalIDs[libovsdbops.ObjectNameKey.String()], name+dbIDEIPNamePodDivider) && + nat.ExternalIP == status.EgressIP && nat.LogicalPort != nil && + *nat.LogicalPort == ni.GetNetworkScopedK8sMgmtIntfName(status.Node) + } + ops, err = libovsdbops.DeleteNATsWithPredicateOps(e.nbClient, ops, natPred) + if err != nil { + return fmt.Errorf("error removing egress ip %s nats on router %s: %v", name, routerName, err) + } + } else if ni.IsSecondary() { + if ops, err = e.deleteGWMarkPolicyForStatusOps(ni, ops, status, name); err != nil { + return fmt.Errorf("failed to delete gateway mark policy: %v", err) + } } } + _, err = libovsdbops.TransactAndCheck(e.nbClient, ops) if err != nil { - return nil, fmt.Errorf("error transacting ops %+v: %v", ops, err) + return fmt.Errorf("error transacting ops %+v: %v", ops, err) } - var podIPs []net.IP - for i := range nats { - nat := nats[i] - podIP := net.ParseIP(nat.LogicalIP) - podIPs = append(podIPs, podIP) - } - - return podIPs, nil + return nil } -func (e *egressIPZoneController) ensureOnlyValidNextHops(name string, ops []libovsdb.Operation) ([]libovsdb.Operation, error) { +func (e *EgressIPController) ensureOnlyValidNextHops(ni util.NetInfo, name string, ops []libovsdb.Operation) ([]libovsdb.Operation, error) { // When no nextHopIP is found, This may happen when node object is already deleted. // So compare validNextHopIPs associated with current eIP.Status and Nexthops present // in the LogicalRouterPolicy, then delete nexthop(s) from LogicalRouterPolicy if // it doesn't match with nexthops derived from eIP.Status. policyPred := func(item *nbdb.LogicalRouterPolicy) bool { - return item.Priority == types.EgressIPReroutePriority && item.ExternalIDs["name"] == name + return item.Priority == types.EgressIPReroutePriority && + strings.HasPrefix(item.ExternalIDs[libovsdbops.ObjectNameKey.String()], name+dbIDEIPNamePodDivider) && + item.ExternalIDs[libovsdbops.NetworkKey.String()] == ni.GetNetworkName() } eIP, err := e.watchFactory.GetEgressIP(name) if err != nil && !apierrors.IsNotFound(err) { return ops, fmt.Errorf("error retrieving EgressIP %s object for updating logical router policy nexthops, err: %w", name, err) } else if err != nil && apierrors.IsNotFound(err) { // EgressIP object is not found, so delete LRP associated with it. - ops, err = libovsdbops.DeleteLogicalRouterPolicyWithPredicateOps(e.nbClient, ops, e.GetNetworkScopedClusterRouterName(), policyPred) + ops, err = libovsdbops.DeleteLogicalRouterPolicyWithPredicateOps(e.nbClient, ops, ni.GetNetworkScopedClusterRouterName(), policyPred) if err != nil { return ops, fmt.Errorf("error creating ops to remove logical router policy for EgressIP %s from router %s: %v", - name, e.GetNetworkScopedClusterRouterName(), err) + name, ni.GetNetworkScopedClusterRouterName(), err) } } else { validNextHopIPs := make(sets.Set[string]) for _, validStatus := range eIP.Status.Items { - nextHopIP, err := e.attemptToGetNextHopIP(name, validStatus) + nextHopIP, err := e.attemptToGetNextHopIP(ni, name, validStatus) if err != nil { return ops, fmt.Errorf("failed to delete EgressIP %s (%s) because unable to determine next hop: %v", name, validStatus.EgressIP, err) @@ -1968,10 +2798,10 @@ func (e *egressIPZoneController) ensureOnlyValidNextHops(name string, ops []libo return ops, fmt.Errorf("error finding logical router policy for EgressIP %s: %v", name, err) } if len(validNextHopIPs) == 0 { - ops, err = libovsdbops.DeleteLogicalRouterPoliciesOps(e.nbClient, ops, e.GetNetworkScopedClusterRouterName(), reRoutePolicies...) + ops, err = libovsdbops.DeleteLogicalRouterPoliciesOps(e.nbClient, ops, ni.GetNetworkScopedClusterRouterName(), reRoutePolicies...) if err != nil { return ops, fmt.Errorf("error creating ops to remove logical router policy for EgressIP %s from router %s: %v", - name, e.GetNetworkScopedClusterRouterName(), err) + name, ni.GetNetworkScopedClusterRouterName(), err) } return ops, nil } @@ -1980,10 +2810,10 @@ func (e *egressIPZoneController) ensureOnlyValidNextHops(name string, ops []libo if validNextHopIPs.Has(nextHop) { continue } - ops, err = libovsdbops.DeleteNextHopsFromLogicalRouterPolicyOps(e.nbClient, ops, e.GetNetworkScopedClusterRouterName(), []*nbdb.LogicalRouterPolicy{policy}, nextHop) + ops, err = libovsdbops.DeleteNextHopsFromLogicalRouterPolicyOps(e.nbClient, ops, ni.GetNetworkScopedClusterRouterName(), []*nbdb.LogicalRouterPolicy{policy}, nextHop) if err != nil { return ops, fmt.Errorf("error creating ops to remove stale next hop IP %s from logical router policy for EgressIP %s from router %s: %v", - nextHop, name, e.GetNetworkScopedClusterRouterName(), err) + nextHop, name, ni.GetNetworkScopedClusterRouterName(), err) } } } @@ -1997,13 +2827,13 @@ func (e *egressIPZoneController) ensureOnlyValidNextHops(name string, ops []libo // 2) When EgressIP belongs to OVN network and node is local, then it must return node's gateway router IP address. // 3) When EgressIP belongs to non OVN network and node is local, then it must return node's management port IP address. // 4) When EgressIP belongs to remote node in interconnect zone, then it return node's transit switch IP address. -func (e *egressIPZoneController) attemptToGetNextHopIP(name string, status egressipv1.EgressIPStatusItem) (string, error) { - isLocalZoneEgressNode, _ := e.nodeZoneState.Load(status.Node) - eNode, err := e.watchFactory.GetNode(status.Node) +func (n *EgressIPController) attemptToGetNextHopIP(ni util.NetInfo, name string, status egressipv1.EgressIPStatusItem) (string, error) { + isLocalZoneEgressNode, _ := n.nodeZoneState.Load(status.Node) + eNode, err := n.watchFactory.GetNode(status.Node) if err != nil && !apierrors.IsNotFound(err) { return "", fmt.Errorf("unable to get node for Egress IP %s: %v", status.EgressIP, err) } else if err != nil { - klog.Errorf("Node is not found for Egress IP %s", status.EgressIP) + klog.Errorf("Node %s is not found for Egress IP %s", status.Node, status.EgressIP) return "", nil } var nextHopIP string @@ -2012,7 +2842,7 @@ func (e *egressIPZoneController) attemptToGetNextHopIP(name string, status egres klog.Warningf("Failed to get Egress IP config from node annotation %s: %v", status.Node, err) } else { isOVNNetwork := util.IsOVNNetwork(eIPConfig, eIPIP) - nextHopIP, err = e.getNextHop(status.Node, status.EgressIP, name, isLocalZoneEgressNode, isOVNNetwork) + nextHopIP, err = n.getNextHop(ni, status.Node, status.EgressIP, name, isLocalZoneEgressNode, isOVNNetwork) if err != nil { return "", err } @@ -2020,9 +2850,9 @@ func (e *egressIPZoneController) attemptToGetNextHopIP(name string, status egres return nextHopIP, nil } -func (oc *DefaultNetworkController) addPodIPsToAddressSet(addrSetIPs []net.IP) error { - dbIDs := getEgressIPAddrSetDbIDs(EgressIPServedPodsAddrSetName, oc.controllerName) - as, err := oc.addressSetFactory.GetAddressSet(dbIDs) +func (e *EgressIPController) addPodIPsToAddressSet(network string, controller string, addrSetIPs ...net.IP) error { + dbIDs := getEgressIPAddrSetDbIDs(EgressIPServedPodsAddrSetName, network, controller) + as, err := e.addressSetFactory.GetAddressSet(dbIDs) if err != nil { return fmt.Errorf("cannot ensure that addressSet %s exists %v", EgressIPServedPodsAddrSetName, err) } @@ -2032,9 +2862,9 @@ func (oc *DefaultNetworkController) addPodIPsToAddressSet(addrSetIPs []net.IP) e return nil } -func (oc *DefaultNetworkController) deletePodIPsFromAddressSet(addrSetIPs []net.IP) error { - dbIDs := getEgressIPAddrSetDbIDs(EgressIPServedPodsAddrSetName, oc.controllerName) - as, err := oc.addressSetFactory.GetAddressSet(dbIDs) +func (e *EgressIPController) deletePodIPsFromAddressSet(network string, controller string, addrSetIPs ...net.IP) error { + dbIDs := getEgressIPAddrSetDbIDs(EgressIPServedPodsAddrSetName, network, controller) + as, err := e.addressSetFactory.GetAddressSet(dbIDs) if err != nil { return fmt.Errorf("cannot ensure that addressSet %s exists %v", EgressIPServedPodsAddrSetName, err) } @@ -2046,29 +2876,80 @@ func (oc *DefaultNetworkController) deletePodIPsFromAddressSet(addrSetIPs []net. // createDefaultNoRerouteServicePolicies ensures service reachability from the // host network to any service (except ETP=local) backed by egress IP matching pods -func createDefaultNoRerouteServicePolicies(nbClient libovsdbclient.Client, clusterRouter string, v4ClusterSubnet, v6ClusterSubnet []*net.IPNet) error { +func createDefaultNoRerouteServicePolicies(nbClient libovsdbclient.Client, network, controller, clusterRouter string, + v4ClusterSubnet, v6ClusterSubnet []*net.IPNet, v4JoinSubnet, v6JoinSubnet *net.IPNet) error { for _, v4Subnet := range v4ClusterSubnet { - match := fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4Subnet.String(), config.Gateway.V4JoinSubnet) - if err := createLogicalRouterPolicy(nbClient, clusterRouter, match, types.DefaultNoRereoutePriority, nil, nil); err != nil { + if v4JoinSubnet == nil { + klog.Errorf("Creating no reroute services requires IPv4 join subnet but not found") + continue + } + dbIDs := getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, network, controller) + match := fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4Subnet.String(), v4JoinSubnet.String()) + if err := createLogicalRouterPolicy(nbClient, clusterRouter, match, types.DefaultNoRereoutePriority, nil, dbIDs); err != nil { return fmt.Errorf("unable to create IPv4 no-reroute service policies, err: %v", err) } } for _, v6Subnet := range v6ClusterSubnet { - match := fmt.Sprintf("ip6.src == %s && ip6.dst == %s", v6Subnet.String(), config.Gateway.V6JoinSubnet) - if err := createLogicalRouterPolicy(nbClient, clusterRouter, match, types.DefaultNoRereoutePriority, nil, nil); err != nil { + if v6JoinSubnet == nil { + klog.Errorf("Creating no reroute services requires IPv6 join subnet but not found") + continue + } + dbIDs := getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV6, network, controller) + match := fmt.Sprintf("ip6.src == %s && ip6.dst == %s", v6Subnet.String(), v6JoinSubnet.String()) + if err := createLogicalRouterPolicy(nbClient, clusterRouter, match, types.DefaultNoRereoutePriority, nil, dbIDs); err != nil { return fmt.Errorf("unable to create IPv6 no-reroute service policies, err: %v", err) } } return nil } +func (e *EgressIPController) ensureL3ClusterRouterPoliciesForNetwork(ni util.NetInfo) error { + e.nodeUpdateMutex.Lock() + defer e.nodeUpdateMutex.Unlock() + subnets := util.GetAllClusterSubnetsFromEntries(ni.Subnets()) + if err := InitClusterEgressPolicies(e.nbClient, e.addressSetFactory, ni, subnets, e.controllerName); err != nil { + return fmt.Errorf("failed to initialize networks cluster logical router egress policies for the default network: %v", err) + } + err := ensureDefaultNoRerouteNodePolicies(e.nbClient, e.addressSetFactory, ni.GetNetworkName(), ni.GetNetworkScopedClusterRouterName(), + e.controllerName, listers.NewNodeLister(e.watchFactory.NodeInformer().GetIndexer()), e.v4, e.v6) + if err != nil { + return fmt.Errorf("failed to ensure no reroute node policies for network %s: %v", ni.GetNetworkName(), err) + } + if !config.OVNKubernetesFeature.EnableInterconnect { + return nil + } + nodes := e.nodeZoneState.GetKeys() + for _, node := range nodes { + if isLocal, ok := e.nodeZoneState.Load(node); ok && isLocal { + if err := libovsdbutil.CreateDefaultRouteToExternal(e.nbClient, ni.GetNetworkScopedClusterRouterName(), + ni.GetNetworkScopedGWRouterName(node), ni.Subnets()); err != nil { + return fmt.Errorf("failed to create route to external for network %s: %v", ni.GetNetworkName(), err) + } + } + } + return nil +} + +func (e *EgressIPController) ensureL3SwitchPoliciesForNode(ni util.NetInfo, nodeName string) error { + e.nodeUpdateMutex.Lock() + defer e.nodeUpdateMutex.Unlock() + ops, err := e.ensureDefaultNoReRouteQosRulesForNode(ni, nodeName, nil) + if err != nil { + return fmt.Errorf("failed to ensure no reroute QoS rules for node %s and network %s: %v", nodeName, ni.GetNetworkName(), err) + } + if _, err = libovsdbops.TransactAndCheck(e.nbClient, ops); err != nil { + return fmt.Errorf("unable to ensure default no reroute QoS rules for network %s, err: %v", ni.GetNetworkName(), err) + } + return nil +} + // createDefaultNoRerouteReplyTrafficPolicies ensures any traffic which is a response/reply from the egressIP pods // will not be re-routed to egress-nodes. This ensures EIP can work well with ETP=local // this policy is ipFamily neutral -func createDefaultNoRerouteReplyTrafficPolicy(nbClient libovsdbclient.Client, clusterRouter string) error { +func createDefaultNoRerouteReplyTrafficPolicy(nbClient libovsdbclient.Client, network, controller, clusterRouter string) error { match := fmt.Sprintf("pkt.mark == %d", types.EgressIPReplyTrafficConnectionMark) - externalIDs := getEgressIPLRPNoReRouteDbIDs(types.DefaultNoRereoutePriority, ReplyTrafficNoReroute, IPFamilyValue).GetExternalIDs() - if err := createLogicalRouterPolicy(nbClient, clusterRouter, match, types.DefaultNoRereoutePriority, externalIDs, nil); err != nil { + dbIDs := getEgressIPLRPNoReRouteDbIDs(types.DefaultNoRereoutePriority, ReplyTrafficNoReroute, IPFamilyValue, network, controller) + if err := createLogicalRouterPolicy(nbClient, clusterRouter, match, types.DefaultNoRereoutePriority, nil, dbIDs); err != nil { return fmt.Errorf("unable to create no-reroute reply traffic policies, err: %v", err) } return nil @@ -2076,16 +2957,18 @@ func createDefaultNoRerouteReplyTrafficPolicy(nbClient libovsdbclient.Client, cl // createDefaultNoReroutePodPolicies ensures egress pods east<->west traffic with regular pods, // i.e: ensuring that an egress pod can still communicate with a regular pod / service backed by regular pods -func createDefaultNoReroutePodPolicies(nbClient libovsdbclient.Client, clusterRouter string, v4ClusterSubnet, v6ClusterSubnet []*net.IPNet) error { +func createDefaultNoReroutePodPolicies(nbClient libovsdbclient.Client, network, controller, clusterRouter string, v4ClusterSubnet, v6ClusterSubnet []*net.IPNet) error { for _, v4Subnet := range v4ClusterSubnet { match := fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4Subnet.String(), v4Subnet.String()) - if err := createLogicalRouterPolicy(nbClient, clusterRouter, match, types.DefaultNoRereoutePriority, nil, nil); err != nil { + dbIDs := getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, network, controller) + if err := createLogicalRouterPolicy(nbClient, clusterRouter, match, types.DefaultNoRereoutePriority, nil, dbIDs); err != nil { return fmt.Errorf("unable to create IPv4 no-reroute pod policies, err: %v", err) } } for _, v6Subnet := range v6ClusterSubnet { match := fmt.Sprintf("ip6.src == %s && ip6.dst == %s", v6Subnet.String(), v6Subnet.String()) - if err := createLogicalRouterPolicy(nbClient, clusterRouter, match, types.DefaultNoRereoutePriority, nil, nil); err != nil { + dbIDs := getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV6, network, controller) + if err := createLogicalRouterPolicy(nbClient, clusterRouter, match, types.DefaultNoRereoutePriority, nil, dbIDs); err != nil { return fmt.Errorf("unable to create IPv6 no-reroute pod policies, err: %v", err) } } @@ -2097,13 +2980,12 @@ func createDefaultNoReroutePodPolicies(nbClient libovsdbclient.Client, clusterRo // This mark is then matched on the reroute policies to determine if its a reply packet // in which case we do not need to reRoute to other nodes and if its a service response it is // not rerouted since mark will be present. -func createDefaultReRouteQoSRuleOps(nbClient libovsdbclient.Client, addressSetFactory addressset.AddressSetFactory, - controllerName string) ([]*nbdb.QoS, []ovsdb.Operation, error) { +func createDefaultReRouteQoSRuleOps(nbClient libovsdbclient.Client, addressSetFactory addressset.AddressSetFactory, ops []libovsdb.Operation, + network, controller string, isIPv4Mode, isIPv6Mode bool) ([]*nbdb.QoS, []ovsdb.Operation, error) { // fetch the egressIP pods address-set - dbIDs := getEgressIPAddrSetDbIDs(EgressIPServedPodsAddrSetName, controllerName) + dbIDs := getEgressIPAddrSetDbIDs(EgressIPServedPodsAddrSetName, network, controller) var as addressset.AddressSet var err error - var ops []ovsdb.Operation qoses := []*nbdb.QoS{} if as, err = addressSetFactory.GetAddressSet(dbIDs); err != nil { return nil, nil, fmt.Errorf("cannot ensure that addressSet %s exists %v", EgressIPServedPodsAddrSetName, err) @@ -2114,70 +2996,117 @@ func createDefaultReRouteQoSRuleOps(nbClient libovsdbclient.Client, addressSetFa Action: map[string]int{"mark": types.EgressIPReplyTrafficConnectionMark}, Direction: nbdb.QoSDirectionFromLport, } - if config.IPv4Mode { + if isIPv4Mode { + if ipv4EgressIPServedPodsAS == "" { + return nil, nil, fmt.Errorf("failed to fetch IPv4 address set %s hash names", EgressIPServedPodsAddrSetName) + } qosV4Rule := qosRule qosV4Rule.Match = fmt.Sprintf(`ip4.src == $%s && ct.trk && ct.rpl`, ipv4EgressIPServedPodsAS) - qosV4Rule.ExternalIDs = getEgressIPQoSRuleDbIDs(IPFamilyValueV4).GetExternalIDs() - ops, err = libovsdbops.CreateOrUpdateQoSesOps(nbClient, nil, &qosV4Rule) + qosV4Rule.ExternalIDs = getEgressIPQoSRuleDbIDs(IPFamilyValueV4, network, controller).GetExternalIDs() + ops, err = libovsdbops.CreateOrUpdateQoSesOps(nbClient, ops, &qosV4Rule) if err != nil { - return nil, nil, fmt.Errorf("cannot create v4 QoS rule ops for egressIP feature on controller %s, %v", controllerName, err) + return nil, nil, fmt.Errorf("cannot create v4 QoS rule ops for egressIP feature on controller %s, %v", controller, err) } qoses = append(qoses, &qosV4Rule) } - if config.IPv6Mode { + if isIPv6Mode { + if ipv6EgressIPServedPodsAS == "" { + return nil, nil, fmt.Errorf("failed to fetch IPv6 address set %s hash names", EgressIPServedPodsAddrSetName) + } qosV6Rule := qosRule qosV6Rule.Match = fmt.Sprintf(`ip6.src == $%s && ct.trk && ct.rpl`, ipv6EgressIPServedPodsAS) - qosV6Rule.ExternalIDs = getEgressIPQoSRuleDbIDs(IPFamilyValueV6).GetExternalIDs() + qosV6Rule.ExternalIDs = getEgressIPQoSRuleDbIDs(IPFamilyValueV6, network, controller).GetExternalIDs() ops, err = libovsdbops.CreateOrUpdateQoSesOps(nbClient, ops, &qosV6Rule) if err != nil { - return nil, nil, fmt.Errorf("cannot create v6 QoS rule ops for egressIP feature on controller %s, %v", controllerName, err) + return nil, nil, fmt.Errorf("cannot create v6 QoS rule ops for egressIP feature on controller %s, %v", controller, err) } qoses = append(qoses, &qosV6Rule) } return qoses, ops, nil } -func (oc *DefaultNetworkController) ensureDefaultNoRerouteQoSRules(nodeName string) error { - oc.eIPC.nodeUpdateMutex.Lock() - defer oc.eIPC.nodeUpdateMutex.Unlock() - var ops []ovsdb.Operation +func (e *EgressIPController) ensureDefaultNoRerouteQoSRules(nodeName string) error { + e.nodeUpdateMutex.Lock() + defer e.nodeUpdateMutex.Unlock() + defaultNetInfo, err := e.nadController.GetNetwork(types.DefaultNetworkName) + if err != nil { + return fmt.Errorf("failed to get default network from NAD controller: %v", err) + } + var ops []libovsdb.Operation + ops, err = e.ensureDefaultNoReRouteQosRulesForNode(defaultNetInfo, nodeName, ops) + if err != nil { + return fmt.Errorf("failed to process default network: %v", err) + } + if err = e.nadController.DoWithLock(func(network util.NetInfo) error { + if network.GetNetworkName() == types.DefaultNetworkName { + return nil + } + ops, err = e.ensureDefaultNoReRouteQosRulesForNode(network, nodeName, ops) + if err != nil { + return fmt.Errorf("failed to process network %s: %v", network.GetNetworkName(), err) + } + return nil + }); err != nil { + return fmt.Errorf("failed to ensure default no reroute QoS rules: %v", err) + } + if _, err := libovsdbops.TransactAndCheck(e.nbClient, ops); err != nil { + return fmt.Errorf("unable to add EgressIP QoS to switch, err: %v", err) + } + return nil +} + +func (e *EgressIPController) ensureDefaultNoReRouteQosRulesForNode(ni util.NetInfo, nodeName string, ops []libovsdb.Operation) ([]libovsdb.Operation, error) { // since this function is called from node update event, let us check // libovsdb cache before trying to create insert/update ops so that it // doesn't cause no-op construction spams at scale (kubelet sends node // update events every 10seconds so we don't want to cause unnecessary // no-op transacts that often and lookup is less expensive) - predicateIDs := libovsdbops.NewDbObjectIDs(libovsdbops.LogicalRouterPolicyEgressIP, DefaultNetworkControllerName, - map[libovsdbops.ExternalIDKey]string{ - libovsdbops.ObjectNameKey: string(ReplyTrafficMark), - }) - qosPredicate := libovsdbops.GetPredicate[*nbdb.QoS](predicateIDs, nil) - existingQoSes, err := libovsdbops.FindQoSesWithPredicate(oc.nbClient, qosPredicate) - if err != nil { - return err + getQOSForFamily := func(ipFamily egressIPFamilyValue, existingQOSes []*nbdb.QoS) ([]*nbdb.QoS, error) { + dbIDs := getEgressIPQoSRuleDbIDs(ipFamily, ni.GetNetworkName(), e.controllerName) + p := libovsdbops.GetPredicate[*nbdb.QoS](dbIDs, nil) + existingQoSesForIPFamily, err := libovsdbops.FindQoSesWithPredicate(e.nbClient, p) + if err != nil { + return nil, fmt.Errorf("failed to find QOS with predicate: %v", err) + } + return append(existingQOSes, existingQoSesForIPFamily...), nil + } + existingQoSes := make([]*nbdb.QoS, 0, 2) + var err error + if e.v4 { + existingQoSes, err = getQOSForFamily(IPFamilyValueV4, existingQoSes) + if err != nil { + return nil, fmt.Errorf("failed to get existing IPv4 QOS rules: %v", err) + } + } + if e.v6 { + existingQoSes, err = getQOSForFamily(IPFamilyValueV6, existingQoSes) + if err != nil { + return nil, fmt.Errorf("failed to get existing IPv6 QOS rules: %v", err) + } } qosExists := false - if config.IPv4Mode && config.IPv6Mode && len(existingQoSes) == 2 { + if e.v4 && e.v6 && len(existingQoSes) == 2 { // no need to create QoS Rule ops; already exists; dualstack qosExists = true } - if len(existingQoSes) == 1 && ((config.IPv4Mode && !config.IPv6Mode) || (config.IPv6Mode && !config.IPv4Mode)) { + if len(existingQoSes) == 1 && ((e.v4 && !e.v6) || (e.v6 && !e.v4)) { // no need to create QoS Rule ops; already exists; single stack qosExists = true } if !qosExists { - existingQoSes, ops, err = createDefaultReRouteQoSRuleOps(oc.nbClient, oc.addressSetFactory, oc.controllerName) + existingQoSes, ops, err = createDefaultReRouteQoSRuleOps(e.nbClient, e.addressSetFactory, ops, ni.GetNetworkName(), e.controllerName, e.v4, e.v6) if err != nil { - return fmt.Errorf("cannot create QoS rule ops: %v", err) + return nil, fmt.Errorf("cannot create QoS rule ops: %v", err) } } if len(existingQoSes) > 0 { - nodeSwitchName := oc.GetNetworkScopedSwitchName(nodeName) + nodeSwitchName := ni.GetNetworkScopedSwitchName(nodeName) if qosExists { // check if these rules were already added to the existing switch or not addQoSToSwitch := false - nodeSwitch, err := libovsdbops.GetLogicalSwitch(oc.nbClient, &nbdb.LogicalSwitch{Name: nodeSwitchName}) + nodeSwitch, err := libovsdbops.GetLogicalSwitch(e.nbClient, &nbdb.LogicalSwitch{Name: nodeSwitchName}) if err != nil { - return fmt.Errorf("cannot fetch switch for node %s: %v", nodeSwitchName, err) + return nil, fmt.Errorf("cannot fetch switch for node %s: %v", nodeSwitchName, err) } for _, qos := range existingQoSes { if slices.Contains(nodeSwitch.QOSRules, qos.UUID) { @@ -2188,25 +3117,48 @@ func (oc *DefaultNetworkController) ensureDefaultNoRerouteQoSRules(nodeName stri } } if !addQoSToSwitch { - return nil + return ops, nil } } - ops, err = libovsdbops.AddQoSesToLogicalSwitchOps(oc.nbClient, ops, nodeSwitchName, existingQoSes...) + ops, err = libovsdbops.AddQoSesToLogicalSwitchOps(e.nbClient, ops, nodeSwitchName, existingQoSes...) if err != nil { - return err + return nil, fmt.Errorf("failed to add QoS to logical switch %s: %v", nodeSwitchName, err) } } - if _, err := libovsdbops.TransactAndCheck(oc.nbClient, ops); err != nil { - return fmt.Errorf("unable to add EgressIP QoS to switch on zone %s, err: %v", oc.zone, err) - } - return nil + return ops, nil } -func (oc *DefaultNetworkController) ensureDefaultNoRerouteNodePolicies() error { - oc.eIPC.nodeUpdateMutex.Lock() - defer oc.eIPC.nodeUpdateMutex.Unlock() - nodeLister := listers.NewNodeLister(oc.watchFactory.NodeInformer().GetIndexer()) - return ensureDefaultNoRerouteNodePolicies(oc.nbClient, oc.addressSetFactory, oc.controllerName, oc.GetNetworkScopedClusterRouterName(), nodeLister) +func (e *EgressIPController) ensureDefaultNoRerouteNodePolicies() error { + e.nodeUpdateMutex.Lock() + defer e.nodeUpdateMutex.Unlock() + nodeLister := listers.NewNodeLister(e.watchFactory.NodeInformer().GetIndexer()) + // ensure default network is processed + defaultNetInfo, err := e.nadController.GetNetwork(types.DefaultNetworkName) + if err != nil { + return fmt.Errorf("failed to get default network: %v", err) + } + err = ensureDefaultNoRerouteNodePolicies(e.nbClient, e.addressSetFactory, defaultNetInfo.GetNetworkName(), defaultNetInfo.GetNetworkScopedClusterRouterName(), + e.controllerName, nodeLister, e.v4, e.v6) + if err != nil { + return fmt.Errorf("failed to ensure default no reroute policies for nodes for default network: %v", err) + } + if !util.IsNetworkSegmentationSupportEnabled() { + return nil + } + if err = e.nadController.DoWithLock(func(network util.NetInfo) error { + if network.GetNetworkName() == types.DefaultNetworkName { + return nil + } + err = ensureDefaultNoRerouteNodePolicies(e.nbClient, e.addressSetFactory, network.GetNetworkName(), network.GetNetworkScopedClusterRouterName(), + e.controllerName, nodeLister, e.v4, e.v6) + if err != nil { + return fmt.Errorf("failed to ensure default no reroute policies for nodes for network %s: %v", network.GetNetworkName(), err) + } + return nil + }); err != nil { + return err + } + return nil } // ensureDefaultNoRerouteNodePolicies ensures egress pods east<->west traffic with hostNetwork pods, @@ -2216,22 +3168,23 @@ func (oc *DefaultNetworkController) ensureDefaultNoRerouteNodePolicies() error { // All the cluster node's addresses are considered. This is to avoid race conditions after a VIP moves from one node // to another where we might process events out of order. For the same reason this function needs to be called under // lock. -func ensureDefaultNoRerouteNodePolicies(nbClient libovsdbclient.Client, addressSetFactory addressset.AddressSetFactory, controllerName, clusterRouter string, nodeLister listers.NodeLister) error { +func ensureDefaultNoRerouteNodePolicies(nbClient libovsdbclient.Client, addressSetFactory addressset.AddressSetFactory, + network, router, controller string, nodeLister listers.NodeLister, v4, v6 bool) error { nodes, err := nodeLister.List(labels.Everything()) if err != nil { - return err + return fmt.Errorf("failed to list nodes: %v", err) } - - v4NodeAddrs, v6NodeAddrs, err := util.GetNodeAddresses(config.IPv4Mode, config.IPv6Mode, nodes...) + v4NodeAddrs, v6NodeAddrs, err := util.GetNodeAddresses(v4, v6, nodes...) if err != nil { - return err + return fmt.Errorf("failed to get node addresses: %v", err) } allAddresses := make([]net.IP, 0, len(v4NodeAddrs)+len(v6NodeAddrs)) allAddresses = append(allAddresses, v4NodeAddrs...) allAddresses = append(allAddresses, v6NodeAddrs...) var as addressset.AddressSet - dbIDs := getEgressIPAddrSetDbIDs(NodeIPAddrSetName, controllerName) + // all networks reference the same node IP address set + dbIDs := getEgressIPAddrSetDbIDs(NodeIPAddrSetName, types.DefaultNetworkName, DefaultNetworkControllerName) if as, err = addressSetFactory.GetAddressSet(dbIDs); err != nil { return fmt.Errorf("cannot ensure that addressSet %s exists %v", NodeIPAddrSetName, err) } @@ -2242,14 +3195,14 @@ func ensureDefaultNoRerouteNodePolicies(nbClient libovsdbclient.Client, addressS ipv4ClusterNodeIPAS, ipv6ClusterNodeIPAS := as.GetASHashNames() // fetch the egressIP pods address-set - dbIDs = getEgressIPAddrSetDbIDs(EgressIPServedPodsAddrSetName, controllerName) + dbIDs = getEgressIPAddrSetDbIDs(EgressIPServedPodsAddrSetName, network, controller) if as, err = addressSetFactory.GetAddressSet(dbIDs); err != nil { return fmt.Errorf("cannot ensure that addressSet %s exists %v", EgressIPServedPodsAddrSetName, err) } ipv4EgressIPServedPodsAS, ipv6EgressIPServedPodsAS := as.GetASHashNames() // fetch the egressService pods address-set - dbIDs = egresssvc.GetEgressServiceAddrSetDbIDs(controllerName) + dbIDs = egresssvc.GetEgressServiceAddrSetDbIDs(controller) if as, err = addressSetFactory.GetAddressSet(dbIDs); err != nil { return fmt.Errorf("cannot ensure that addressSet %s exists %v", egresssvc.EgressServiceServedPodsAddrSetName, err) } @@ -2258,40 +3211,99 @@ func ensureDefaultNoRerouteNodePolicies(nbClient libovsdbclient.Client, addressS var matchV4, matchV6 string // construct the policy match if len(v4NodeAddrs) > 0 { + if ipv4EgressIPServedPodsAS == "" || ipv4EgressServiceServedPodsAS == "" || ipv4ClusterNodeIPAS == "" { + return fmt.Errorf("address set name(s) %s not found %q %q %q", as.GetName(), ipv4EgressServiceServedPodsAS, ipv4EgressServiceServedPodsAS, ipv4ClusterNodeIPAS) + } matchV4 = fmt.Sprintf(`(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s`, ipv4EgressIPServedPodsAS, ipv4EgressServiceServedPodsAS, ipv4ClusterNodeIPAS) } if len(v6NodeAddrs) > 0 { + if ipv6EgressIPServedPodsAS == "" || ipv6EgressServiceServedPodsAS == "" || ipv6ClusterNodeIPAS == "" { + return fmt.Errorf("address set hash name(s) %s not found", as.GetName()) + } matchV6 = fmt.Sprintf(`(ip6.src == $%s || ip6.src == $%s) && ip6.dst == $%s`, ipv6EgressIPServedPodsAS, ipv6EgressServiceServedPodsAS, ipv6ClusterNodeIPAS) } options := map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark} // Create global allow policy for node traffic if matchV4 != "" { - if err := createLogicalRouterPolicy(nbClient, clusterRouter, matchV4, types.DefaultNoRereoutePriority, nil, options); err != nil { + dbIDs = getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, network, controller) + if err := createLogicalRouterPolicy(nbClient, router, matchV4, types.DefaultNoRereoutePriority, options, dbIDs); err != nil { return fmt.Errorf("unable to create IPv4 no-reroute node policies, err: %v", err) } } if matchV6 != "" { - if err := createLogicalRouterPolicy(nbClient, clusterRouter, matchV6, types.DefaultNoRereoutePriority, nil, options); err != nil { + dbIDs = getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV6, network, controller) + if err := createLogicalRouterPolicy(nbClient, router, matchV6, types.DefaultNoRereoutePriority, options, dbIDs); err != nil { return fmt.Errorf("unable to create IPv6 no-reroute node policies, err: %v", err) } } return nil } -func createLogicalRouterPolicy(nbClient libovsdbclient.Client, clusterRouter, match string, priority int, externalIDs, options map[string]string) error { +func (e *EgressIPController) getPodIPs(ni util.NetInfo, pod *corev1.Pod, nadName string) ([]*net.IPNet, error) { + podIPs := make([]*net.IPNet, 0) + getIPFromIPNetFn := func(podIPNets []*net.IPNet) []*net.IPNet { + ipNetsCopy := make([]*net.IPNet, 0, len(podIPNets)) + for _, ipNet := range podIPNets { + ipNetCopy := *ipNet + ipNetsCopy = append(ipNetsCopy, &ipNetCopy) + } + return ipNetsCopy + } + if e.isPodScheduledinLocalZone(pod) { + // Retrieve the pod's networking configuration from the + // logicalPortCache. The reason for doing this: a) only normal network + // pods are placed in this cache, b) once the pod is placed here we know + // addLogicalPort has finished successfully setting up networking for + // the pod, so we can proceed with retrieving its IP and deleting the + // external GW configuration created in addLogicalPort for the pod. + logicalPort, err := e.logicalPortCache.get(pod, nadName) + if err != nil { + return nil, nil + } + // Since the logical switch port cache removes entries only 60 seconds + // after deletion, its possible that when pod is recreated with the same name + // within the 60seconds timer, stale info gets used to create SNATs and reroutes + // for the eip pods. Checking if the expiry is set for the port or not can indicate + // if the port is scheduled for deletion. + if !logicalPort.expires.IsZero() { + klog.Warningf("Stale LSP %s for pod %s/%s found in cache refetching", + logicalPort.name, pod.Namespace, pod.Name) + return nil, nil + } + podIPs = getIPFromIPNetFn(logicalPort.ips) + } else { // means this is egress node's local master + if ni.IsDefault() { + podIPNets, err := util.GetPodCIDRsWithFullMask(pod, ni) + if err != nil { + return nil, fmt.Errorf("failed to get pod %s/%s IP: %v", pod.Namespace, pod.Name, err) + } + if len(podIPNets) == 0 { + return nil, fmt.Errorf("failed to get pod %s/%s IPs", pod.Namespace, pod.Name) + } + podIPs = getIPFromIPNetFn(podIPNets) + } else if ni.IsSecondary() { + podIPNets := util.GetPodCIDRsWithFullMaskOfNetwork(pod, nadName) + if len(podIPNets) == 0 { + return nil, fmt.Errorf("failed to get pod %s/%s IPs", pod.Namespace, pod.Name) + } + podIPs = getIPFromIPNetFn(podIPNets) + } + } + return podIPs, nil +} + +func createLogicalRouterPolicy(nbClient libovsdbclient.Client, clusterRouter, match string, priority int, options map[string]string, dbIDs *libovsdbops.DbObjectIDs) error { lrp := nbdb.LogicalRouterPolicy{ Priority: priority, Action: nbdb.LogicalRouterPolicyActionAllow, Match: match, - ExternalIDs: externalIDs, + ExternalIDs: dbIDs.GetExternalIDs(), Options: options, } - p := func(item *nbdb.LogicalRouterPolicy) bool { - return item.Match == lrp.Match && item.Priority == lrp.Priority - } + p := libovsdbops.GetPredicate[*nbdb.LogicalRouterPolicy](dbIDs, nil) err := libovsdbops.CreateOrUpdateLogicalRouterPolicyWithPredicate(nbClient, clusterRouter, &lrp, p) if err != nil { return fmt.Errorf("error creating logical router policy %+v on router %s: %v", lrp, clusterRouter, err) @@ -2316,25 +3328,30 @@ func DeleteLegacyDefaultNoRerouteNodePolicies(nbClient libovsdbclient.Client, cl return libovsdbops.DeleteLogicalRouterPoliciesWithPredicate(nbClient, clusterRouter, p) } -func (e *egressIPZoneController) buildSNATFromEgressIPStatus(podIP net.IP, status egressipv1.EgressIPStatusItem, egressIPName string) (*nbdb.NAT, error) { +func (e *EgressIPController) buildSNATFromEgressIPStatus(ni util.NetInfo, podIP net.IP, status egressipv1.EgressIPStatusItem, egressIPName, podNamespace, podName string) (*nbdb.NAT, error) { logicalIP := &net.IPNet{ IP: podIP, Mask: util.GetIPFullMask(podIP), } + ipFamily := IPFamilyValueV4 + if utilnet.IsIPv6CIDR(logicalIP) { + ipFamily = IPFamilyValueV6 + } externalIP := net.ParseIP(status.EgressIP) - logicalPort := e.GetNetworkScopedK8sMgmtIntfName(status.Node) - externalIds := map[string]string{"name": egressIPName} + logicalPort := ni.GetNetworkScopedK8sMgmtIntfName(status.Node) + externalIds := getEgressIPNATDbIDs(egressIPName, podNamespace, podName, ipFamily, e.controllerName).GetExternalIDs() nat := libovsdbops.BuildSNAT(&externalIP, logicalIP, logicalPort, externalIds) return nat, nil } -func (e *egressIPZoneController) createNATRuleOps(ops []ovsdb.Operation, podIPs []*net.IPNet, status egressipv1.EgressIPStatusItem, egressIPName string) ([]ovsdb.Operation, error) { +func (e *EgressIPController) createNATRuleOps(ni util.NetInfo, ops []ovsdb.Operation, podIPs []*net.IPNet, status egressipv1.EgressIPStatusItem, + egressIPName, podNamespace, podName string) ([]ovsdb.Operation, error) { nats := make([]*nbdb.NAT, 0, len(podIPs)) var nat *nbdb.NAT var err error for _, podIP := range podIPs { if (utilnet.IsIPv6String(status.EgressIP) && utilnet.IsIPv6(podIP.IP)) || (!utilnet.IsIPv6String(status.EgressIP) && !utilnet.IsIPv6(podIP.IP)) { - nat, err = e.buildSNATFromEgressIPStatus(podIP.IP, status, egressIPName) + nat, err = e.buildSNATFromEgressIPStatus(ni, podIP.IP, status, egressIPName, podNamespace, podName) if err != nil { return nil, err } @@ -2342,7 +3359,7 @@ func (e *egressIPZoneController) createNATRuleOps(ops []ovsdb.Operation, podIPs } } router := &nbdb.LogicalRouter{ - Name: e.GetNetworkScopedGWRouterName(status.Node), + Name: ni.GetNetworkScopedGWRouterName(status.Node), } ops, err = libovsdbops.CreateOrUpdateNATsOps(e.nbClient, ops, router, nats...) if err != nil { @@ -2351,29 +3368,112 @@ func (e *egressIPZoneController) createNATRuleOps(ops []ovsdb.Operation, podIPs return ops, nil } -func (e *egressIPZoneController) deleteNATRuleOps(ops []ovsdb.Operation, podIPs []*net.IPNet, status egressipv1.EgressIPStatusItem, egressIPName string) ([]ovsdb.Operation, error) { - nats := make([]*nbdb.NAT, 0, len(podIPs)) - var nat *nbdb.NAT +func (e *EgressIPController) deleteNATRuleOps(ni util.NetInfo, ops []ovsdb.Operation, status egressipv1.EgressIPStatusItem, + egressIPName, podNamespace, podName string) ([]ovsdb.Operation, error) { var err error - for _, podIP := range podIPs { - if (utilnet.IsIPv6String(status.EgressIP) && utilnet.IsIPv6(podIP.IP)) || (!utilnet.IsIPv6String(status.EgressIP) && !utilnet.IsIPv6(podIP.IP)) { - nat, err = e.buildSNATFromEgressIPStatus(podIP.IP, status, egressIPName) - if err != nil { - return nil, err - } - nats = append(nats, nat) - } - } + pV4 := libovsdbops.GetPredicate[*nbdb.NAT](getEgressIPNATDbIDs(egressIPName, podNamespace, podName, IPFamilyValueV4, e.controllerName), nil) + pV6 := libovsdbops.GetPredicate[*nbdb.NAT](getEgressIPNATDbIDs(egressIPName, podNamespace, podName, IPFamilyValueV6, e.controllerName), nil) router := &nbdb.LogicalRouter{ - Name: e.GetNetworkScopedGWRouterName(status.Node), + Name: ni.GetNetworkScopedGWRouterName(status.Node), } - ops, err = libovsdbops.DeleteNATsOps(e.nbClient, ops, router, nats...) + ops, err = libovsdbops.DeleteNATsWithPredicateOps(e.nbClient, ops, pV4) if err != nil { - return nil, fmt.Errorf("unable to remove snat rules for router: %s, error: %v", router.Name, err) + return nil, fmt.Errorf("unable to remove SNAT IPv4 rules for router: %s, error: %v", router.Name, err) + } + ops, err = libovsdbops.DeleteNATsWithPredicateOps(e.nbClient, ops, pV6) + if err != nil { + return nil, fmt.Errorf("unable to remove SNAT IPv6 rules for router: %s, error: %v", router.Name, err) } return ops, nil } +// getNetworkFromPodAssignmentWithLock attempts to find a pods network from the pod assignment cache. If pod is not found +// in cache or no network set, nil network is return. +func (e *EgressIPController) getNetworkFromPodAssignmentWithLock(podKey string) util.NetInfo { + e.podAssignmentMutex.Lock() + defer e.podAssignmentMutex.Unlock() + return e.getNetworkFromPodAssignment(podKey) +} + +// getNetworkFromPodAssignmentWithLock attempts to find a pods network from the pod assignment cache. If pod is not found +// in cache or no network set, nil network is return. +func (e *EgressIPController) getNetworkFromPodAssignment(podKey string) util.NetInfo { + podAssignment, ok := e.podAssignment[podKey] + if !ok { + return nil + } + return podAssignment.network +} + +func ensureDefaultNoRerouteUDNEnabledSvcPolicies(nbClient libovsdbclient.Client, addressSetFactory addressset.AddressSetFactory, + ni util.NetInfo, controllerName string, v4, v6 bool) error { + var err error + var as addressset.AddressSet + // fetch the egressIP pods address-set + dbIDs := getEgressIPAddrSetDbIDs(EgressIPServedPodsAddrSetName, ni.GetNetworkName(), controllerName) + if as, err = addressSetFactory.GetAddressSet(dbIDs); err != nil { + return fmt.Errorf("cannot ensure that addressSet %s exists %v", EgressIPServedPodsAddrSetName, err) + } + ipv4EgressIPServedPodsAS, ipv6EgressIPServedPodsAS := as.GetASHashNames() + + // fetch the egressService pods address-set + dbIDs = egresssvc.GetEgressServiceAddrSetDbIDs(controllerName) + if as, err = addressSetFactory.GetAddressSet(dbIDs); err != nil { + return fmt.Errorf("cannot ensure that addressSet %s exists %v", egresssvc.EgressServiceServedPodsAddrSetName, err) + } + ipv4EgressServiceServedPodsAS, ipv6EgressServiceServedPodsAS := as.GetASHashNames() + + dbIDs = udnenabledsvc.GetAddressSetDBIDs() + var ipv4UDNEnabledSvcAS, ipv6UDNEnabledSvcAS string + // address set maybe not created immediately + err = wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (done bool, err error) { + + as, err := addressSetFactory.GetAddressSet(dbIDs) + if err != nil { + klog.V(5).Infof("Failed to get UDN enabled service address set, retrying: %v", err) + return false, nil + } + ipv4UDNEnabledSvcAS, ipv6UDNEnabledSvcAS = as.GetASHashNames() + if ipv4UDNEnabledSvcAS == "" && ipv6UDNEnabledSvcAS == "" { // only one IP family is required + return false, nil + } + return true, nil + }) + if err != nil { + return fmt.Errorf("failed to retrieve UDN enabled service address set from NB DB: %v", err) + } + + var matchV4, matchV6 string + // construct the policy match + if v4 && ipv4UDNEnabledSvcAS != "" { + matchV4 = fmt.Sprintf(`(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s`, + ipv4EgressIPServedPodsAS, ipv4EgressServiceServedPodsAS, ipv4UDNEnabledSvcAS) + } + if v6 && ipv6UDNEnabledSvcAS != "" { + if ipv6EgressIPServedPodsAS == "" || ipv6EgressServiceServedPodsAS == "" || ipv6UDNEnabledSvcAS == "" { + return fmt.Errorf("address set hash name(s) %s not found", as.GetName()) + } + matchV6 = fmt.Sprintf(`(ip6.src == $%s || ip6.src == $%s) && ip6.dst == $%s`, + ipv6EgressIPServedPodsAS, ipv6EgressServiceServedPodsAS, ipv6UDNEnabledSvcAS) + } + + // Create global allow policy for UDN enabled service traffic + if v4 && matchV4 != "" { + dbIDs = getEgressIPLRPNoReRouteDbIDs(types.DefaultNoRereoutePriority, NoReRouteUDNPodToCDNSvc, IPFamilyValueV4, ni.GetNetworkName(), controllerName) + if err := createLogicalRouterPolicy(nbClient, ni.GetNetworkScopedClusterRouterName(), matchV4, types.DefaultNoRereoutePriority, nil, dbIDs); err != nil { + return fmt.Errorf("unable to create IPv4 no-rerouteUDN pod to CDN svc, err: %v", err) + } + } + + if v6 && matchV6 != "" { + dbIDs = getEgressIPLRPNoReRouteDbIDs(types.DefaultNoRereoutePriority, NoReRouteUDNPodToCDNSvc, IPFamilyValueV6, ni.GetNetworkName(), controllerName) + if err := createLogicalRouterPolicy(nbClient, ni.GetNetworkScopedClusterRouterName(), matchV6, types.DefaultNoRereoutePriority, nil, dbIDs); err != nil { + return fmt.Errorf("unable to create IPv6 no-reroute UDN pod to CDN svc policies, err: %v", err) + } + } + return nil +} + func getPodKey(pod *kapi.Pod) string { return fmt.Sprintf("%s_%s", pod.Namespace, pod.Name) } @@ -2382,3 +3482,38 @@ func getPodNamespaceAndNameFromKey(podKey string) (string, string) { parts := strings.Split(podKey, "_") return parts[0], parts[1] } + +func getEgressIPPktMark(eipName string, annotations map[string]string) util.EgressIPMark { + var err error + var mark util.EgressIPMark + if util.IsNetworkSegmentationSupportEnabled() && util.IsEgressIPMarkSet(annotations) { + mark, err = util.ParseEgressIPMark(annotations) + if err != nil { + klog.Errorf("Failed to get EgressIP %s packet mark from annotations: %v", eipName, err) + } + } + return mark +} + +func getPodIPFromEIPSNATMarkMatch(match string) string { + //format ${IP family}.src == ${pod IP} + if match == "" { + return "" + } + matchSplit := strings.Split(match, " ") + if len(matchSplit) != 3 { + return "" + } + return matchSplit[2] +} + +func getEIPIPFamily(isIPv6 bool) egressIPFamilyValue { + if isIPv6 { + return IPFamilyValueV6 + } + return IPFamilyValueV4 +} + +func addPktMarkToLRPOptions(options map[string]string, mark string) { + options["pkt_mark"] = mark +} diff --git a/go-controller/pkg/ovn/egressip_test.go b/go-controller/pkg/ovn/egressip_test.go index 4edbbdca66..c42e858489 100644 --- a/go-controller/pkg/ovn/egressip_test.go +++ b/go-controller/pkg/ovn/egressip_test.go @@ -7,10 +7,6 @@ import ( "strings" "time" - "github.com/onsi/ginkgo" - ginkgotable "github.com/onsi/ginkgo/extensions/table" - "github.com/onsi/gomega" - libovsdbclient "github.com/ovn-org/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" egressipv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" @@ -22,11 +18,15 @@ import ( libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + ginkgotable "github.com/onsi/ginkgo/extensions/table" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" "github.com/urfave/cli/v2" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8stypes "k8s.io/apimachinery/pkg/types" + utilnet "k8s.io/utils/net" utilpointer "k8s.io/utils/pointer" ) @@ -65,14 +65,6 @@ const ( inspectTimeout = 4 * time.Second // arbitrary, to avoid failures on github CI ) -var eipExternalID = map[string]string{ - "name": egressIPName, -} - -var eip2ExternalID = map[string]string{ - "name": egressIP2Name, -} - func newEgressIPMeta(name string) metav1.ObjectMeta { return metav1.ObjectMeta{ UID: k8stypes.UID(name), @@ -80,6 +72,7 @@ func newEgressIPMeta(name string) metav1.ObjectMeta { Labels: map[string]string{ "name": name, }, + Annotations: map[string]string{util.EgressIPMarkAnnotation: "50001"}, } } @@ -91,7 +84,7 @@ type nodeInfo struct { var egressPodLabel = map[string]string{"egress": "needed"} -var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { +var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network", func() { var ( app *cli.App fakeOvn *FakeOVN @@ -142,14 +135,14 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { return reAssignmentCount } - getIPv4Nodes := func(nodeInfos []nodeInfo) []v1.Node { + getIPv4Nodes := func(nodeInfos []nodeInfo) []corev1.Node { // first address in each nodeAddress address is assumed to be OVN network nodeSuffix := 1 nodeSubnets := []string{v4Node1Subnet, v4Node2Subnet, v4Node3Subnet} if len(nodeInfos) > len(nodeSubnets) { panic("not enough node subnets for the amount of nodes that needs to be created") } - nodes := make([]v1.Node, 0) + nodes := make([]corev1.Node, 0) var hostCIDRs string for i, ni := range nodeInfos { hostCIDRs = "[" @@ -174,14 +167,14 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { return nodes } - getIPv6Nodes := func(nodeInfos []nodeInfo) []v1.Node { + getIPv6Nodes := func(nodeInfos []nodeInfo) []corev1.Node { // first address in each nodeAddress address is assumed to be OVN network nodeSuffix := 1 nodeSubnets := []string{v6Node1Subnet, v6Node2Subnet} if len(nodeInfos) > len(nodeSubnets) { panic("not enough node subnets for the amount of nodes that needs to be created") } - nodes := make([]v1.Node, 0) + nodes := make([]corev1.Node, 0) var hostCIDRs string for i, ni := range nodeInfos { hostCIDRs = "[" @@ -231,7 +224,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { fakeOvn.shutdown() }) - getPodAssignmentState := func(pod *v1.Pod) *podAssignmentState { + getPodAssignmentState := func(pod *corev1.Pod) *podAssignmentState { fakeOvn.controller.eIPC.podAssignmentMutex.Lock() defer fakeOvn.controller.eIPC.podAssignmentMutex.Unlock() if pas := fakeOvn.controller.eIPC.podAssignment[getPodKey(pod)]; pas != nil { @@ -323,11 +316,11 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { &egressipv1.EgressIPList{ Items: []egressipv1.EgressIP{eIP}, }, - &v1.NodeList{ - Items: []v1.Node{node1}, + &corev1.NodeList{ + Items: []corev1.Node{node1}, }, - &v1.NamespaceList{ - Items: []v1.Namespace{*egressNamespace}, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace}, }) err := fakeOvn.controller.WatchEgressIPNamespaces() @@ -344,55 +337,55 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { fakeOvn.controller.logicalPortCache.add(&egressPod, "", types.DefaultNetworkName, "", nil, []*net.IPNet{n}) _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(egressPod.Namespace).Create(context.TODO(), &egressPod, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - node1Switch.QOSRules = []string{"egressip-QoS-UUID"} + node1Switch.QOSRules = []string{"default-QoS-UUID"} expectedNatLogicalPort := "k8s-node1" - egressSVCServedPodsASv4, _ := buildEgressIPServiceAddressSets(nil) - egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}) + egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) + egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) ipNets, _ := util.ParseIPNets(node1IPv4Addresses) egressNodeIPs := []string{} for _, ipNet := range ipNets { egressNodeIPs = append(egressNodeIPs, ipNet.IP.String()) } egressNodeIPsASv4, _ := buildEgressIPNodeAddressSets(egressNodeIPs) + node1Switch.QOSRules = []string{"default-QoS-UUID"} expectedDatabaseState := []libovsdbtest.TestData{ &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.EgressIPReroutePriority, - Match: fmt.Sprintf("ip4.src == %s", egressPod.Status.PodIP), - Action: nbdb.LogicalRouterPolicyActionReroute, - Nexthops: nodeLogicalRouterIPv4, - ExternalIDs: map[string]string{ - "name": eIP.Name, - }, - UUID: "reroute-UUID", + Priority: types.EgressIPReroutePriority, + Match: fmt.Sprintf("ip4.src == %s", egressPod.Status.PodIP), + Action: nbdb.LogicalRouterPolicyActionReroute, + Nexthops: nodeLogicalRouterIPv4, + ExternalIDs: getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + UUID: "reroute-UUID", }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "(ip4.src == $a4548040316634674295 || ip4.src == $a13607449821398607916) && ip4.dst == $a14918748166599097711", - Action: nbdb.LogicalRouterPolicyActionAllow, - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, - UUID: "no-reroute-node-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "(ip4.src == $a8519615025667110816 || ip4.src == $a13607449821398607916) && ip4.dst == $a712973235162149816", + Action: nbdb.LogicalRouterPolicyActionAllow, + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + UUID: "no-reroute-node-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.NAT{ - UUID: "egressip-nat-UUID", - LogicalIP: podV4IP, - ExternalIP: egressIP, - ExternalIDs: map[string]string{ - "name": egressIPName, - }, + UUID: "egressip-nat-UUID", + LogicalIP: podV4IP, + ExternalIP: egressIP, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod.Namespace, egressPod.Name, IPFamilyValueV4, fakeOvn.controller.controllerName).GetExternalIDs(), Type: nbdb.NATTypeSNAT, LogicalPort: &expectedNatLogicalPort, Options: map[string]string{ @@ -408,7 +401,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", - Policies: []string{"reroute-UUID", "default-no-reroute-UUID", "no-reroute-service-UUID", "no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"}, + Policies: []string{"reroute-UUID", "default-no-reroute-UUID", "no-reroute-service-UUID", "no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouterPort{ UUID: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1.Name + "-UUID", @@ -431,7 +424,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name + "-UUID"}, }, node1Switch, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressIPServedPodsASv4, egressNodeIPsASv4, @@ -528,11 +521,11 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { &egressipv1.EgressIPList{ Items: []egressipv1.EgressIP{eIP}, }, - &v1.NodeList{ - Items: []v1.Node{node1}, + &corev1.NodeList{ + Items: []corev1.Node{node1}, }, - &v1.NamespaceList{ - Items: []v1.Namespace{*egressNamespace}, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace}, }) err := fakeOvn.controller.WatchEgressIPNamespaces() @@ -552,9 +545,9 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { node1MgntIP, err := getSwitchManagementPortIP(&node1) gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - node1Switch.QOSRules = []string{"egressip-QoS-UUID"} - egressSVCServedPodsASv4, _ := buildEgressIPServiceAddressSets(nil) - egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}) + node1Switch.QOSRules = []string{"default-QoS-UUID"} + egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) + egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) ipNets, _ := util.ParseIPNets(node1IPv4Addresses) egressNodeIPs := []string{} @@ -564,34 +557,35 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { egressNodeIPsASv4, _ := buildEgressIPNodeAddressSets(egressNodeIPs) expectedDatabaseState := []libovsdbtest.TestData{ &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.EgressIPReroutePriority, - Match: fmt.Sprintf("ip4.src == %s", egressPod.Status.PodIP), - Action: nbdb.LogicalRouterPolicyActionReroute, - Nexthops: []string{node1MgntIP.To4().String()}, - ExternalIDs: map[string]string{ - "name": eIP.Name, - }, - UUID: "reroute-UUID", + Priority: types.EgressIPReroutePriority, + Match: fmt.Sprintf("ip4.src == %s", egressPod.Status.PodIP), + Action: nbdb.LogicalRouterPolicyActionReroute, + Nexthops: []string{node1MgntIP.To4().String()}, + ExternalIDs: getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + UUID: "reroute-UUID", }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "(ip4.src == $a4548040316634674295 || ip4.src == $a13607449821398607916) && ip4.dst == $a14918748166599097711", - Action: nbdb.LogicalRouterPolicyActionAllow, - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, - UUID: "no-reroute-node-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "(ip4.src == $a8519615025667110816 || ip4.src == $a13607449821398607916) && ip4.dst == $a712973235162149816", + Action: nbdb.LogicalRouterPolicyActionAllow, + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + UUID: "no-reroute-node-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node1.Name, @@ -601,7 +595,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", - Policies: []string{"reroute-UUID", "default-no-reroute-UUID", "no-reroute-service-UUID", "no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"}, + Policies: []string{"reroute-UUID", "default-no-reroute-UUID", "no-reroute-service-UUID", "no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouterPort{ UUID: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1.Name + "-UUID", @@ -629,7 +623,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name + "-UUID"}, }, node1Switch, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressIPServedPodsASv4, egressNodeIPsASv4, @@ -761,11 +755,11 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { &egressipv1.EgressIPList{ Items: []egressipv1.EgressIP{eIP}, }, - &v1.NodeList{ - Items: []v1.Node{node2, node1}, + &corev1.NodeList{ + Items: []corev1.Node{node2, node1}, }, - &v1.NamespaceList{ - Items: []v1.Namespace{*egressNamespace}, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace}, }) err := fakeOvn.controller.WatchEgressIPNamespaces() @@ -804,11 +798,11 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { fakeOvn.controller.logicalPortCache.add(&egressPod, "", types.DefaultNetworkName, "", nil, []*net.IPNet{n}) _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(egressPod.Namespace).Create(context.TODO(), &egressPod, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - node1Switch.QOSRules = []string{"egressip-QoS-UUID"} - node2Switch.QOSRules = []string{"egressip-QoS-UUID"} + node1Switch.QOSRules = []string{"default-QoS-UUID"} + node2Switch.QOSRules = []string{"default-QoS-UUID"} expectedNatLogicalPort := "k8s-node2" - egressSVCServedPodsASv4, _ := buildEgressIPServiceAddressSets(nil) - egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}) + egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) + egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) ipNets, _ := util.ParseIPNets(append(node1IPv4Addresses, node2IPv4Addresses...)) egressNodeIPs := []string{} @@ -818,42 +812,41 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { egressNodeIPsASv4, _ := buildEgressIPNodeAddressSets(egressNodeIPs) expectedDatabaseState := []libovsdbtest.TestData{ &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.EgressIPReroutePriority, - Match: fmt.Sprintf("ip4.src == %s", egressPod.Status.PodIP), - Action: nbdb.LogicalRouterPolicyActionReroute, - Nexthops: node2LogicalRouterIPv4, - ExternalIDs: map[string]string{ - "name": eIP.Name, - }, - UUID: "reroute-UUID", + Priority: types.EgressIPReroutePriority, + Match: fmt.Sprintf("ip4.src == %s", egressPod.Status.PodIP), + Action: nbdb.LogicalRouterPolicyActionReroute, + Nexthops: node2LogicalRouterIPv4, + ExternalIDs: getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + UUID: "reroute-UUID", }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "(ip4.src == $a4548040316634674295 || ip4.src == $a13607449821398607916) && ip4.dst == $a14918748166599097711", - Action: nbdb.LogicalRouterPolicyActionAllow, - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, - UUID: "no-reroute-node-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "(ip4.src == $a8519615025667110816 || ip4.src == $a13607449821398607916) && ip4.dst == $a712973235162149816", + Action: nbdb.LogicalRouterPolicyActionAllow, + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + UUID: "no-reroute-node-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.NAT{ - UUID: "egressip-nat-UUID", - LogicalIP: podV4IP, - ExternalIP: egressIP, - ExternalIDs: map[string]string{ - "name": egressIPName, - }, + UUID: "egressip-nat-UUID", + LogicalIP: podV4IP, + ExternalIP: egressIP, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod.Namespace, egressPod.Name, IPFamilyValueV4, fakeOvn.controller.controllerName).GetExternalIDs(), Type: nbdb.NATTypeSNAT, LogicalPort: &expectedNatLogicalPort, Options: map[string]string{ @@ -874,7 +867,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", - Policies: []string{"reroute-UUID", "default-no-reroute-UUID", "no-reroute-service-UUID", "no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"}, + Policies: []string{"reroute-UUID", "default-no-reroute-UUID", "no-reroute-service-UUID", "no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouterPort{ UUID: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node2.Name + "-UUID", @@ -918,7 +911,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, node1Switch, node2Switch, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressIPServedPodsASv4, egressNodeIPsASv4, @@ -1089,11 +1082,11 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { &egressipv1.EgressIPList{ Items: []egressipv1.EgressIP{eIP}, }, - &v1.NodeList{ - Items: []v1.Node{node1, node2, node3}, + &corev1.NodeList{ + Items: []corev1.Node{node1, node2, node3}, }, - &v1.NamespaceList{ - Items: []v1.Namespace{*egressNamespace}, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace}, }) err := fakeOvn.controller.WatchEgressIPNamespaces() @@ -1167,11 +1160,11 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(egressPod.Namespace).Create(context.TODO(), &egressPod, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) expectedNatLogicalPort := "k8s-node2" - node1Switch.QOSRules = []string{"egressip-QoS-UUID"} - node2Switch.QOSRules = []string{"egressip-QoS-UUID"} - node3Switch.QOSRules = []string{"egressip-QoS-UUID"} - egressSVCServedPodsASv4, _ := buildEgressIPServiceAddressSets(nil) - egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}) + node1Switch.QOSRules = []string{"default-QoS-UUID"} + node2Switch.QOSRules = []string{"default-QoS-UUID"} + node3Switch.QOSRules = []string{"default-QoS-UUID"} + egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) + egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) ipNets, _ := util.ParseIPNets(append(node2IPv4Addresses, node3IPv4Addresses...)) egressNodeIPs := []string{} for _, ipNet := range ipNets { @@ -1180,41 +1173,40 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { egressNodeIPsASv4, _ := buildEgressIPNodeAddressSets(egressNodeIPs) expectedDatabaseState := []libovsdbtest.TestData{ &nbdb.LogicalRouterPolicy{ - Priority: types.EgressIPReroutePriority, - Match: fmt.Sprintf("ip4.src == %s", egressPod.Status.PodIP), - Action: nbdb.LogicalRouterPolicyActionReroute, - Nexthops: node2LogicalRouterIPv4, - ExternalIDs: map[string]string{ - "name": eIP.Name, - }, - UUID: "reroute-UUID", + Priority: types.EgressIPReroutePriority, + Match: fmt.Sprintf("ip4.src == %s", egressPod.Status.PodIP), + Action: nbdb.LogicalRouterPolicyActionReroute, + Nexthops: node2LogicalRouterIPv4, + ExternalIDs: getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + UUID: "reroute-UUID", }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "(ip4.src == $a4548040316634674295 || ip4.src == $a13607449821398607916) && ip4.dst == $a14918748166599097711", - Action: nbdb.LogicalRouterPolicyActionAllow, - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, - UUID: "no-reroute-node-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "(ip4.src == $a8519615025667110816 || ip4.src == $a13607449821398607916) && ip4.dst == $a712973235162149816", + Action: nbdb.LogicalRouterPolicyActionAllow, + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + UUID: "no-reroute-node-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.NAT{ - UUID: "egressip-nat-UUID", - LogicalIP: podV4IP, - ExternalIP: egressIP, - ExternalIDs: map[string]string{ - "name": egressIPName, - }, + UUID: "egressip-nat-UUID", + LogicalIP: podV4IP, + ExternalIP: egressIP, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod.Namespace, egressPod.Name, IPFamilyValueV4, fakeOvn.controller.controllerName).GetExternalIDs(), Type: nbdb.NATTypeSNAT, LogicalPort: &expectedNatLogicalPort, Options: map[string]string{ @@ -1241,7 +1233,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", Policies: []string{"reroute-UUID", "default-no-reroute-UUID", "no-reroute-service-UUID", - "no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"}, + "no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouterPort{ UUID: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node2.Name + "-UUID", @@ -1303,11 +1295,11 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: types.ExternalSwitchPrefix + node3Name, Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node3Name + "-UUID"}, }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), node1Switch, node2Switch, node3Switch, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressIPServedPodsASv4, egressNodeIPsASv4, @@ -1499,11 +1491,11 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { &egressipv1.EgressIPList{ Items: []egressipv1.EgressIP{eIP}, }, - &v1.NodeList{ - Items: []v1.Node{node1, node2, node3}, + &corev1.NodeList{ + Items: []corev1.Node{node1, node2, node3}, }, - &v1.NamespaceList{ - Items: []v1.Namespace{*egressNamespace}, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace}, }) err := fakeOvn.controller.WatchEgressIPNamespaces() @@ -1577,11 +1569,11 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) node2MgntIP, err := getSwitchManagementPortIP(&node2) gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - node1Switch.QOSRules = []string{"egressip-QoS-UUID"} - node2Switch.QOSRules = []string{"egressip-QoS-UUID"} - node3Switch.QOSRules = []string{"egressip-QoS-UUID"} - egressSVCServedPodsASv4, _ := buildEgressIPServiceAddressSets(nil) - egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}) + node1Switch.QOSRules = []string{"default-QoS-UUID"} + node2Switch.QOSRules = []string{"default-QoS-UUID"} + node3Switch.QOSRules = []string{"default-QoS-UUID"} + egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) + egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) ipNets, _ := util.ParseIPNets(append(node2IPv4Addresses, node3IPv4Addresses...)) egressNodeIPs := []string{} for _, ipNet := range ipNets { @@ -1590,34 +1582,35 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { egressNodeIPsASv4, _ := buildEgressIPNodeAddressSets(egressNodeIPs) expectedDatabaseState := []libovsdbtest.TestData{ &nbdb.LogicalRouterPolicy{ - Priority: types.EgressIPReroutePriority, - Match: fmt.Sprintf("ip4.src == %s", egressPod.Status.PodIP), - Action: nbdb.LogicalRouterPolicyActionReroute, - Nexthops: []string{node2MgntIP.To4().String()}, - ExternalIDs: map[string]string{ - "name": eIP.Name, - }, - UUID: "reroute-UUID", + Priority: types.EgressIPReroutePriority, + Match: fmt.Sprintf("ip4.src == %s", egressPod.Status.PodIP), + Action: nbdb.LogicalRouterPolicyActionReroute, + Nexthops: []string{node2MgntIP.To4().String()}, + ExternalIDs: getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + UUID: "reroute-UUID", }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "(ip4.src == $a4548040316634674295 || ip4.src == $a13607449821398607916) && ip4.dst == $a14918748166599097711", - Action: nbdb.LogicalRouterPolicyActionAllow, - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, - UUID: "no-reroute-node-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "(ip4.src == $a8519615025667110816 || ip4.src == $a13607449821398607916) && ip4.dst == $a712973235162149816", + Action: nbdb.LogicalRouterPolicyActionAllow, + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + UUID: "no-reroute-node-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node1.Name, @@ -1638,7 +1631,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", - Policies: []string{"reroute-UUID", "default-no-reroute-UUID", "no-reroute-service-UUID", "no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"}, + Policies: []string{"reroute-UUID", "default-no-reroute-UUID", "no-reroute-service-UUID", "no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouterPort{ UUID: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node2.Name + "-UUID", @@ -1718,7 +1711,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { node1Switch, node2Switch, node3Switch, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressIPServedPodsASv4, egressNodeIPsASv4, @@ -1876,8 +1869,8 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { &egressipv1.EgressIPList{ Items: []egressipv1.EgressIP{eIP}, }, - &v1.NodeList{ - Items: []v1.Node{node1, node2}, + &corev1.NodeList{ + Items: []corev1.Node{node1, node2}, }) i, n, _ := net.ParseCIDR(podV4IP + "/23") @@ -1931,28 +1924,32 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { reroutePolicyNextHop = []string{"100.88.0.3"} // node2's transit switch portIP } - egressSVCServedPodsASv4, _ := buildEgressIPServiceAddressSets(nil) - egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}) + egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) + egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) expectedDatabaseState := []libovsdbtest.TestData{ - getReRoutePolicy(egressPod.Status.PodIP, "4", "reroute-UUID", reroutePolicyNextHop, eipExternalID), + getReRoutePolicy(egressPod.Status.PodIP, "4", "reroute-UUID", reroutePolicyNextHop, + getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs()), &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "(ip4.src == $a4548040316634674295 || ip4.src == $a13607449821398607916) && ip4.dst == $a14918748166599097711", - Action: nbdb.LogicalRouterPolicyActionAllow, - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, - UUID: "no-reroute-node-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "(ip4.src == $a8519615025667110816 || ip4.src == $a13607449821398607916) && ip4.dst == $a712973235162149816", + Action: nbdb.LogicalRouterPolicyActionAllow, + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + UUID: "no-reroute-node-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node1.Name, @@ -1967,7 +1964,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", - Policies: []string{"reroute-UUID", "no-reroute-node-UUID", "default-no-reroute-UUID", "no-reroute-service-UUID", "egressip-no-reroute-reply-traffic"}, + Policies: []string{"reroute-UUID", "no-reroute-node-UUID", "default-no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouterPort{ UUID: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1.Name + "-UUID", @@ -2019,20 +2016,20 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: types.ExternalSwitchPrefix + node2Name, Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name + "-UUID"}, }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), node1Switch, node2Switch, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressIPServedPodsASv4, } if node2Zone != "remote" { // add qosrules only if node is in local zone - node2Switch.QOSRules = []string{"egressip-QoS-UUID"} + node2Switch.QOSRules = []string{"default-QoS-UUID"} } if node1Zone == "global" { // QoS Rule is configured only for nodes in local zones, the master of the remote zone will do it for the remote nodes - node1Switch.QOSRules = []string{"egressip-QoS-UUID"} + node1Switch.QOSRules = []string{"default-QoS-UUID"} ipNets, _ := util.ParseIPNets(append(node1IPv4Addresses, node2IPv4Addresses...)) egressNodeIPs := []string{} for _, ipNet := range ipNets { @@ -2042,7 +2039,8 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { expectedDatabaseState = append(expectedDatabaseState, egressNodeIPsASv4) } if node1Zone == "remote" { - expectedDatabaseState = append(expectedDatabaseState, getReRoutePolicy(egressPod.Status.PodIP, "4", "remote-reroute-UUID", reroutePolicyNextHop, eipExternalID)) + expectedDatabaseState = append(expectedDatabaseState, getReRoutePolicy(egressPod.Status.PodIP, "4", "remote-reroute-UUID", reroutePolicyNextHop, + getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs())) expectedDatabaseState[6].(*nbdb.LogicalRouter).Policies = expectedDatabaseState[6].(*nbdb.LogicalRouter).Policies[1:] // remove LRP ref expectedDatabaseState[6].(*nbdb.LogicalRouter).Policies = append(expectedDatabaseState[6].(*nbdb.LogicalRouter).Policies, "remote-reroute-UUID") // remove LRP ref expectedDatabaseState = expectedDatabaseState[1:] // remove LRP @@ -2232,18 +2230,18 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { &egressipv1.EgressIPList{ Items: []egressipv1.EgressIP{eIPOVN, eIPSecondaryHost}, }, - &v1.NodeList{ - Items: []v1.Node{node1, node2}, + &corev1.NodeList{ + Items: []corev1.Node{node1, node2}, }, - &v1.NamespaceList{ - Items: []v1.Namespace{*egressNamespace, *egressNamespace2}, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace, *egressNamespace2}, }, - &v1.PodList{ - Items: []v1.Pod{egressPod1Node1, egressPod2Node1, egressPod3Node2, egressPod4Node2}, + &corev1.PodList{ + Items: []corev1.Pod{egressPod1Node1, egressPod2Node1, egressPod3Node2, egressPod4Node2}, }) for _, p := range []struct { - v1.Pod + corev1.Pod podIP string }{ { @@ -2306,37 +2304,50 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { lrps := make([]*nbdb.LogicalRouterPolicy, 0) if !interconnect { - lrps = append(lrps, getReRoutePolicy(egressPod1Node1.Status.PodIP, "4", "reroute-UUID", egressPod1Node1Reroute, eipExternalID), - getReRoutePolicy(egressPod2Node1.Status.PodIP, "4", "reroute-UUID2", egressPod2Node1Reroute, eip2ExternalID), - getReRoutePolicy(egressPod3Node2.Status.PodIP, "4", "reroute-UUID3", egressPod3Node2Reroute, eipExternalID), - getReRoutePolicy(egressPod4Node2.Status.PodIP, "4", "reroute-UUID4", egressPod4Node2Reroute, eip2ExternalID)) + lrps = append(lrps, getReRoutePolicy(egressPod1Node1.Status.PodIP, "4", "reroute-UUID", egressPod1Node1Reroute, + getEgressIPLRPReRouteDbIDs(eIPOVN.Name, egressPod1Node1.Namespace, egressPod1Node1.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs()), + getReRoutePolicy(egressPod2Node1.Status.PodIP, "4", "reroute-UUID2", egressPod2Node1Reroute, + getEgressIPLRPReRouteDbIDs(eIPSecondaryHost.Name, egressPod2Node1.Namespace, egressPod2Node1.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs()), + getReRoutePolicy(egressPod3Node2.Status.PodIP, "4", "reroute-UUID3", egressPod3Node2Reroute, + getEgressIPLRPReRouteDbIDs(eIPOVN.Name, egressPod3Node2.Namespace, egressPod3Node2.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs()), + getReRoutePolicy(egressPod4Node2.Status.PodIP, "4", "reroute-UUID4", egressPod4Node2Reroute, + getEgressIPLRPReRouteDbIDs(eIPSecondaryHost.Name, egressPod4Node2.Namespace, egressPod4Node2.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs())) } if interconnect && node1Zone == "global" && node2Zone == "global" { - lrps = append(lrps, getReRoutePolicy(egressPod1Node1.Status.PodIP, "4", "reroute-UUID", egressPod1Node1Reroute, eipExternalID), - getReRoutePolicy(egressPod2Node1.Status.PodIP, "4", "reroute-UUID2", egressPod2Node1Reroute, eip2ExternalID), - getReRoutePolicy(egressPod3Node2.Status.PodIP, "4", "reroute-UUID3", egressPod3Node2Reroute, eipExternalID), - getReRoutePolicy(egressPod4Node2.Status.PodIP, "4", "reroute-UUID4", egressPod4Node2Reroute, eip2ExternalID)) + lrps = append(lrps, getReRoutePolicy(egressPod1Node1.Status.PodIP, "4", "reroute-UUID", egressPod1Node1Reroute, + getEgressIPLRPReRouteDbIDs(eIPOVN.Name, egressPod1Node1.Namespace, egressPod1Node1.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs()), + getReRoutePolicy(egressPod2Node1.Status.PodIP, "4", "reroute-UUID2", egressPod2Node1Reroute, + getEgressIPLRPReRouteDbIDs(eIPSecondaryHost.Name, egressPod2Node1.Namespace, egressPod2Node1.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs()), + getReRoutePolicy(egressPod3Node2.Status.PodIP, "4", "reroute-UUID3", egressPod3Node2Reroute, + getEgressIPLRPReRouteDbIDs(eIPOVN.Name, egressPod3Node2.Namespace, egressPod3Node2.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs()), + getReRoutePolicy(egressPod4Node2.Status.PodIP, "4", "reroute-UUID4", egressPod4Node2Reroute, + getEgressIPLRPReRouteDbIDs(eIPSecondaryHost.Name, egressPod4Node2.Namespace, egressPod4Node2.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs())) } if interconnect && node1Zone == "global" && node2Zone == "remote" { - lrps = append(lrps, getReRoutePolicy(egressPod1Node1.Status.PodIP, "4", "reroute-UUID", egressPod1Node1Reroute, eipExternalID), - getReRoutePolicy(egressPod2Node1.Status.PodIP, "4", "reroute-UUID2", egressPod2Node1Reroute, eip2ExternalID), - getReRoutePolicy(podV4IP4, "4", "egressip-pod4node2", egressPod4Node2Reroute, eip2ExternalID)) + lrps = append(lrps, getReRoutePolicy(egressPod1Node1.Status.PodIP, "4", "reroute-UUID", egressPod1Node1Reroute, + getEgressIPLRPReRouteDbIDs(eIPOVN.Name, egressPod1Node1.Namespace, egressPod1Node1.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs()), + getReRoutePolicy(egressPod2Node1.Status.PodIP, "4", "reroute-UUID2", egressPod2Node1Reroute, + getEgressIPLRPReRouteDbIDs(eIPSecondaryHost.Name, egressPod2Node1.Namespace, egressPod2Node1.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs()), + getReRoutePolicy(podV4IP4, "4", "egressip-pod4node2", egressPod4Node2Reroute, + getEgressIPLRPReRouteDbIDs(eIPSecondaryHost.Name, egressPod4Node2.Namespace, egressPod4Node2.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs())) } if interconnect && node1Zone == "remote" && node2Zone == "global" { lrps = append(lrps, - getReRoutePolicy(egressPod3Node2.Status.PodIP, "4", "reroute-UUID", egressPod3Node2Reroute, eipExternalID), - getReRoutePolicy(egressPod4Node2.Status.PodIP, "4", "reroute-UUID2", egressPod4Node2Reroute, eip2ExternalID)) + getReRoutePolicy(egressPod3Node2.Status.PodIP, "4", "reroute-UUID", egressPod3Node2Reroute, + getEgressIPLRPReRouteDbIDs(eIPOVN.Name, egressPod3Node2.Namespace, egressPod3Node2.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs()), + getReRoutePolicy(egressPod4Node2.Status.PodIP, "4", "reroute-UUID2", egressPod4Node2Reroute, + getEgressIPLRPReRouteDbIDs(eIPSecondaryHost.Name, egressPod4Node2.Namespace, egressPod4Node2.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs())) } - ovnCRPolicies := []string{"no-reroute-node-UUID", "default-no-reroute-UUID", "no-reroute-service-UUID", "egressip-no-reroute-reply-traffic"} + ovnCRPolicies := []string{"no-reroute-node-UUID", "default-no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-reply-traffic"} for _, lrp := range lrps { ovnCRPolicies = append(ovnCRPolicies, lrp.UUID) } nodeName := "k8s-node1" - egressSVCServedPodsASv4, _ := buildEgressIPServiceAddressSets(nil) - egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP, podV4IP2, podV4IP3, podV4IP4}) + egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) + egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP, podV4IP2, podV4IP3, podV4IP4}, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) ipNets, _ := util.ParseIPNets(append(node1IPv4Addresses, node2IPv4Addresses...)) egressNodeIPs := []string{} @@ -2346,23 +2357,26 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { egressNodeIPsASv4, _ := buildEgressIPNodeAddressSets(egressNodeIPs) expectedDatabaseState := []libovsdbtest.TestData{ &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "(ip4.src == $a4548040316634674295 || ip4.src == $a13607449821398607916) && ip4.dst == $a14918748166599097711", - Action: nbdb.LogicalRouterPolicyActionAllow, - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, - UUID: "no-reroute-node-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "(ip4.src == $a8519615025667110816 || ip4.src == $a13607449821398607916) && ip4.dst == $a712973235162149816", + Action: nbdb.LogicalRouterPolicyActionAllow, + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + UUID: "no-reroute-node-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node1.Name, @@ -2431,37 +2445,33 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: types.ExternalSwitchPrefix + node2Name, Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name + "-UUID"}, }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), node1Switch, node2Switch, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressIPServedPodsASv4, egressNodeIPsASv4, } if node1Zone != "remote" { // QoS rules is configured only for nodes in local zones, the master of the remote zone will do it for the remote nodes - node1Switch.QOSRules = []string{"egressip-QoS-UUID"} + node1Switch.QOSRules = []string{"default-QoS-UUID"} expectedDatabaseState[3].(*nbdb.LogicalRouter).Nat = append(expectedDatabaseState[3].(*nbdb.LogicalRouter).Nat, "egressip-nat-UUID", "egressip2-nat-UUID") expectedDatabaseState = append(expectedDatabaseState, &nbdb.NAT{ - UUID: "egressip-nat-UUID", - LogicalIP: podV4IP, - ExternalIP: egressIPOVN, - ExternalIDs: map[string]string{ - "name": egressIPName, - }, + UUID: "egressip-nat-UUID", + LogicalIP: podV4IP, + ExternalIP: egressIPOVN, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod1Node1.Namespace, egressPod1Node1.Name, IPFamilyValueV4, fakeOvn.controller.controllerName).GetExternalIDs(), Type: nbdb.NATTypeSNAT, LogicalPort: &nodeName, Options: map[string]string{ "stateless": "false", }, }, &nbdb.NAT{ - UUID: "egressip2-nat-UUID", - LogicalIP: podV4IP3, - ExternalIP: egressIPOVN, - ExternalIDs: map[string]string{ - "name": egressIPName, - }, + UUID: "egressip2-nat-UUID", + LogicalIP: podV4IP3, + ExternalIP: egressIPOVN, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod3Node2.Namespace, egressPod3Node2.Name, IPFamilyValueV4, fakeOvn.controller.controllerName).GetExternalIDs(), Type: nbdb.NATTypeSNAT, LogicalPort: &nodeName, Options: map[string]string{ @@ -2472,7 +2482,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { if node2Zone != "remote" { // add QoS rules config only if node is in local zone - node2Switch.QOSRules = []string{"egressip-QoS-UUID"} + node2Switch.QOSRules = []string{"default-QoS-UUID"} } for _, lrp := range lrps { @@ -2648,11 +2658,11 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { &egressipv1.EgressIPList{ Items: []egressipv1.EgressIP{eIP}, }, - &v1.NodeList{ - Items: []v1.Node{node1, node2}, + &corev1.NodeList{ + Items: []corev1.Node{node1, node2}, }, - &v1.NamespaceList{ - Items: []v1.Namespace{*egressNamespace}, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace}, }) err := fakeOvn.controller.WatchEgressIPNamespaces() @@ -2684,10 +2694,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) expectedNatLogicalPort := "k8s-node1" - primarySNAT := getEIPSNAT(podV4IP, egressIP, expectedNatLogicalPort) + primarySNAT := getEIPSNAT(podV4IP, egressPod.Namespace, egressPod.Name, egressIP, expectedNatLogicalPort, DefaultNetworkControllerName) primarySNAT.UUID = "egressip-nat1-UUID" - egressSVCServedPodsASv4, _ := buildEgressIPServiceAddressSets(nil) - egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}) + egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) + egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) ipNets, _ := util.ParseIPNets([]string{node1IPv4, node2IPv4}) egressNodeIPs := []string{} @@ -2698,23 +2708,26 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { expectedDatabaseState := []libovsdbtest.TestData{ primarySNAT, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "(ip4.src == $a4548040316634674295 || ip4.src == $a13607449821398607916) && ip4.dst == $a14918748166599097711", - Action: nbdb.LogicalRouterPolicyActionAllow, - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, - UUID: "no-reroute-node-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "(ip4.src == $a8519615025667110816 || ip4.src == $a13607449821398607916) && ip4.dst == $a712973235162149816", + Action: nbdb.LogicalRouterPolicyActionAllow, + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + UUID: "no-reroute-node-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node1.Name, @@ -2731,7 +2744,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", - Policies: []string{"default-no-reroute-UUID", "no-reroute-service-UUID", "no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"}, + Policies: []string{"default-no-reroute-UUID", "no-reroute-service-UUID", "no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouterPort{ UUID: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1.Name + "-UUID", @@ -2787,27 +2800,28 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: "k8s-" + node2.Name, Addresses: []string{"fe:1a:c2:3f:0e:fb " + util.GetNodeManagementIfAddr(node2Subnet).IP.String()}, }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), node1Switch, node2Switch, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressIPServedPodsASv4, egressNodeIPsASv4, } if node2Zone != "remote" { // add QoS rules only if node is in local zone - node2Switch.QOSRules = []string{"egressip-QoS-UUID"} + node2Switch.QOSRules = []string{"default-QoS-UUID"} } if node1Zone != "remote" { - expectedDatabaseState = append(expectedDatabaseState, getReRoutePolicy(egressPod.Status.PodIP, "4", "reroute-UUID", nodeLogicalRouterIPv4, eipExternalID)) + expectedDatabaseState = append(expectedDatabaseState, getReRoutePolicy(egressPod.Status.PodIP, "4", "reroute-UUID", + nodeLogicalRouterIPv4, getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs())) expectedDatabaseState[6].(*nbdb.LogicalRouter).Policies = append(expectedDatabaseState[6].(*nbdb.LogicalRouter).Policies, "reroute-UUID") - node1Switch.QOSRules = []string{"egressip-QoS-UUID"} + node1Switch.QOSRules = []string{"default-QoS-UUID"} } else { // if node1 where the pod lives is remote we can't see the EIP setup done since master belongs to local zone expectedDatabaseState[4].(*nbdb.LogicalRouter).Nat = []string{} expectedDatabaseState[6].(*nbdb.LogicalRouter).Policies = []string{"no-reroute-node-UUID", "default-no-reroute-UUID", - "no-reroute-service-UUID", "egressip-no-reroute-reply-traffic"} + "no-reroute-service-UUID", "default-no-reroute-reply-traffic"} expectedDatabaseState = expectedDatabaseState[1:] } gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) @@ -2827,8 +2841,8 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { return ok }).Should(gomega.BeFalse()) - // W0608 12:53:33.728205 1161455 egressip.go:2030] Unable to retrieve gateway IP for node: node1, protocol is IPv6: false, err: attempt at finding node gateway router network information failed, err: unable to find router port rtoj-GR_node1: object not found - // 2023-04-25T11:01:13.2804834Z W0425 11:01:13.280407 21055 egressip.go:2036] Unable to fetch transit switch IP for node: node1: err: failed to get node node1: node "node1" not found + // W0608 12:53:33.728205 1161455 base_network_controller_egressip.go:2030] Unable to retrieve gateway IP for node: node1, protocol is IPv6: false, err: attempt at finding node gateway router network information failed, err: unable to find router port rtoj-GR_node1: object not found + // 2023-04-25T11:01:13.2804834Z W0425 11:01:13.280407 21055 base_network_controller_egressip.go:2036] Unable to fetch transit switch IP for node: node1: err: failed to get node node1: node "node1" not found fakeOvn.patchEgressIPObj(node2Name, egressIPName, egressIP, node2IPv4Net) gomega.Eventually(getEgressIPStatusLen(egressIPName)).Should(gomega.Equal(1)) gomega.Eventually(nodeSwitch).Should(gomega.Equal(node2.Name)) // egressIP successfully reassigned to node2 @@ -2836,32 +2850,36 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { gomega.Expect(egressIPs[0]).To(gomega.Equal(egressIP)) expectedNatLogicalPort = "k8s-node2" - eipSNAT := getEIPSNAT(podV4IP, egressIP, expectedNatLogicalPort) - egressSVCServedPodsASv4, _ = buildEgressIPServiceAddressSets(nil) - egressIPServedPodsASv4, _ = buildEgressIPServedPodsAddressSets([]string{podV4IP}) + eipSNAT := getEIPSNAT(podV4IP, egressPod.Namespace, egressPod.Name, egressIP, expectedNatLogicalPort, DefaultNetworkControllerName) + egressSVCServedPodsASv4, _ = buildEgressServiceAddressSets(nil) + egressIPServedPodsASv4, _ = buildEgressIPServedPodsAddressSets([]string{podV4IP}, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) ip, _, _ := net.ParseCIDR(node2IPv4) egressNodeIPsASv4, _ = buildEgressIPNodeAddressSets([]string{ip.String()}) expectedDatabaseState = []libovsdbtest.TestData{ - getReRoutePolicy(egressPod.Status.PodIP, "4", "reroute-UUID", node2LogicalRouterIPv4, eipExternalID), + getReRoutePolicy(egressPod.Status.PodIP, "4", "reroute-UUID", node2LogicalRouterIPv4, + getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs()), &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "(ip4.src == $a4548040316634674295 || ip4.src == $a13607449821398607916) && ip4.dst == $a14918748166599097711", - Action: nbdb.LogicalRouterPolicyActionAllow, - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, - UUID: "no-reroute-node-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "(ip4.src == $a8519615025667110816 || ip4.src == $a13607449821398607916) && ip4.dst == $a712973235162149816", + Action: nbdb.LogicalRouterPolicyActionAllow, + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + UUID: "no-reroute-node-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node2.Name, @@ -2907,10 +2925,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: "k8s-" + node2.Name, Addresses: []string{"fe:1a:c2:3f:0e:fb " + util.GetNodeManagementIfAddr(node2Subnet).IP.String()}, }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), node1Switch, node2Switch, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressIPServedPodsASv4, egressNodeIPsASv4, @@ -2923,7 +2941,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { // all cases: reroute logical router policy is gone and won't be recreated since node1 is deleted - that is where the pod lives // NOTE: This test is not really a real scenario, it depicts a transient state. expectedDatabaseState[5].(*nbdb.LogicalRouter).Policies = []string{"default-no-reroute-UUID", "no-reroute-service-UUID", - "no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"} + "no-reroute-node-UUID", "default-no-reroute-reply-traffic"} expectedDatabaseState = expectedDatabaseState[1:] gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveDataIgnoringUUIDs(expectedDatabaseState)) @@ -3077,8 +3095,8 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { &egressipv1.EgressIPList{ Items: []egressipv1.EgressIP{eIP}, }, - &v1.NodeList{ - Items: []v1.Node{node1, node2}, + &corev1.NodeList{ + Items: []corev1.Node{node1, node2}, }) i, n, _ := net.ParseCIDR(podV4IP + "/23") @@ -3129,8 +3147,8 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { if interconnect && node1Zone != node2Zone && node2Zone == "remote" { reroutePolicyNextHop = []string{"100.88.0.3"} // node2's transit switch portIP } - egressSVCServedPodsASv4, _ := buildEgressIPServiceAddressSets(nil) - egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}) + egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) + egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) ipNets, _ := util.ParseIPNets(append(node1IPv4Addresses, node2IPv4OVN)) egressNodeIPs := []string{} for _, ipNet := range ipNets { @@ -3138,25 +3156,29 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { } egressNodeIPsASv4, _ := buildEgressIPNodeAddressSets(egressNodeIPs) expectedDatabaseState := []libovsdbtest.TestData{ - getReRoutePolicy(egressPod.Status.PodIP, "4", "reroute-UUID", reroutePolicyNextHop, eipExternalID), + getReRoutePolicy(egressPod.Status.PodIP, "4", "reroute-UUID", reroutePolicyNextHop, + getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs()), &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "(ip4.src == $a4548040316634674295 || ip4.src == $a13607449821398607916) && ip4.dst == $a14918748166599097711", - Action: nbdb.LogicalRouterPolicyActionAllow, - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, - UUID: "no-reroute-node-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "(ip4.src == $a8519615025667110816 || ip4.src == $a13607449821398607916) && ip4.dst == $a712973235162149816", + Action: nbdb.LogicalRouterPolicyActionAllow, + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + UUID: "no-reroute-node-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node1.Name, @@ -3172,7 +3194,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", Policies: []string{"reroute-UUID", "default-no-reroute-UUID", "no-reroute-service-UUID", - "no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"}, + "no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouterPort{ UUID: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1.Name + "-UUID", @@ -3224,10 +3246,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: types.ExternalSwitchPrefix + node2Name, Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name + "-UUID"}, }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), node1Switch, node2Switch, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressIPServedPodsASv4, egressNodeIPsASv4, @@ -3235,14 +3257,15 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { if node1Zone == "global" { // QoS Rule is configured only for nodes in local zones, the master of the remote zone will do it for the remote nodes - node1Switch.QOSRules = []string{"egressip-QoS-UUID"} + node1Switch.QOSRules = []string{"default-QoS-UUID"} } if node2Zone != "remote" { // add QoS config only if node is in local zone - node2Switch.QOSRules = []string{"egressip-QoS-UUID"} + node2Switch.QOSRules = []string{"default-QoS-UUID"} } if node1Zone == "remote" { - podPolicy := getReRoutePolicy(podV4IP, "4", "static-reroute-UUID", []string{node2MgntIP.To4().String()}, eipExternalID) + podPolicy := getReRoutePolicy(podV4IP, "4", "static-reroute-UUID", []string{node2MgntIP.To4().String()}, + getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs()) expectedDatabaseState = append(expectedDatabaseState, podPolicy) expectedDatabaseState[6].(*nbdb.LogicalRouter).Policies = append(expectedDatabaseState[6].(*nbdb.LogicalRouter).Policies, "static-reroute-UUID") expectedDatabaseState[6].(*nbdb.LogicalRouter).Policies = expectedDatabaseState[6].(*nbdb.LogicalRouter).Policies[1:] // remove ref to LRP since static route is routing the pod @@ -3273,6 +3296,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { ginkgotable.DescribeTable("should remove OVN pod egress setup when EgressIP stops matching pod label", func(interconnect, isnode1Local, isnode2Local bool) { config.OVNKubernetesFeature.EnableInterconnect = interconnect + config.IPv6Mode = true app.Action = func(ctx *cli.Context) error { egressIP := net.ParseIP("0:0:0:0:0:feff:c0a8:8e0d") @@ -3368,14 +3392,14 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, }, }, - &v1.NamespaceList{ - Items: []v1.Namespace{*egressNamespace}, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace}, }, - &v1.PodList{ - Items: []v1.Pod{egressPod}, + &corev1.PodList{ + Items: []corev1.Pod{egressPod}, }, - &v1.NodeList{ - Items: []v1.Node{node2}, + &corev1.NodeList{ + Items: []corev1.Node{node2}, }, ) @@ -3395,7 +3419,6 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, }, } - i, n, _ := net.ParseCIDR(podV6IP + "/23") n.IP = i fakeOvn.controller.logicalPortCache.add(&egressPod, "", types.DefaultNetworkName, "", nil, []*net.IPNet{n}) @@ -3424,10 +3447,11 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { gomega.Eventually(getEgressIPStatusLen(eIP.Name)).Should(gomega.Equal(1)) expectedNatLogicalPort := "k8s-node2" - egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets(nil) expectedDatabaseState := []libovsdbtest.TestData{ - getReRoutePolicy(egressPod.Status.PodIP, "6", "reroute-UUID", node2LogicalRouterIPv6, eipExternalID), - getEIPSNAT(podV6IP, egressIP.String(), expectedNatLogicalPort), + getReRoutePolicy(egressPod.Status.PodIP, "6", "reroute-UUID", node2LogicalRouterIPv6, + getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, IPFamilyValueV6, + types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs()), + getEIPSNAT(podV6IP, egressPod.Namespace, egressPod.Name, egressIP.String(), expectedNatLogicalPort, DefaultNetworkControllerName), &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", @@ -3476,7 +3500,6 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: node2.Name, Ports: []string{"k8s-" + node2.Name + "-UUID"}, }, - egressIPServedPodsASv4, } if !isnode2Local { @@ -3484,7 +3507,9 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { expectedDatabaseState[6].(*nbdb.LogicalRouter).Nat = []string{} expectedDatabaseState = expectedDatabaseState[2:] // add policy with nextHop towards egressNode's transit switchIP - expectedDatabaseState = append(expectedDatabaseState, getReRoutePolicy(egressPod.Status.PodIP, "6", "reroute-UUID", []string{"fd97::3"}, eipExternalID)) + expectedDatabaseState = append(expectedDatabaseState, getReRoutePolicy(egressPod.Status.PodIP, + "6", "reroute-UUID", []string{"fd97::3"}, + getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, IPFamilyValueV6, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs())) } if !isnode1Local { expectedDatabaseState[2].(*nbdb.LogicalRouter).Policies = []string{} @@ -3501,7 +3526,6 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(egressPod.Namespace).Update(context.TODO(), podUpdate, metav1.UpdateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Eventually(getEgressIPStatusLen(eIP.Name)).Should(gomega.Equal(1)) - expectedDatabaseState = []libovsdbtest.TestData{ &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, @@ -3551,7 +3575,6 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: node2.Name, Ports: []string{"k8s-" + node2.Name + "-UUID"}, }, - egressIPServedPodsASv4, } gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) @@ -3635,13 +3658,13 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, }, }, - &v1.NamespaceList{ - Items: []v1.Namespace{*egressNamespace}, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace}, }, - &v1.PodList{ - Items: []v1.Pod{egressPod}, + &corev1.PodList{ + Items: []corev1.Pod{egressPod}, }, - &v1.NodeList{Items: []v1.Node{node1, node2}}, + &corev1.NodeList{Items: []corev1.Node{node1, node2}}, ) eIP := egressipv1.EgressIP{ ObjectMeta: newEgressIPMeta(egressIPName), @@ -3685,10 +3708,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { gomega.Eventually(getEgressIPStatusLen(eIP.Name)).Should(gomega.Equal(1)) expectedNatLogicalPort := "k8s-node2" - egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets(nil) expectedDatabaseState := []libovsdbtest.TestData{ - getReRoutePolicy(egressPod.Status.PodIP, "6", "reroute-UUID", node2LogicalRouterIPv6, eipExternalID), - getEIPSNAT(podV6IP, egressIP.String(), expectedNatLogicalPort), + getReRoutePolicy(egressPod.Status.PodIP, "6", "reroute-UUID", node2LogicalRouterIPv6, + getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, IPFamilyValueV6, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs()), + getEIPSNAT(podV6IP, egressPod.Namespace, egressPod.Name, egressIP.String(), expectedNatLogicalPort, DefaultNetworkControllerName), &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", @@ -3735,7 +3758,6 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: node2.Name, Ports: []string{"k8s-" + node2.Name + "-UUID"}, }, - egressIPServedPodsASv4, } if podZone == "remote" { expectedDatabaseState[2].(*nbdb.LogicalRouter).Policies = []string{} @@ -3762,15 +3784,17 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { var key string key, err = retry.GetResourceKey(podUpdate) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - ginkgo.By("retry entry: new obj should not be nil, config should not be nil") - retry.CheckRetryObjectMultipleFieldsEventually( - key, - fakeOvn.controller.retryEgressIPPods, - gomega.BeNil(), // oldObj should be nil - gomega.Not(gomega.BeNil()), // newObj should not be nil - gomega.Not(gomega.BeNil()), // config should not be nil - ) + // no retry is expected if the pod is remote and the node isn't an egress node + if podZone != "remote" { + ginkgo.By("retry entry: new obj should not be nil, config should not be nil") + retry.CheckRetryObjectMultipleFieldsEventually( + key, + fakeOvn.controller.retryEgressIPPods, + gomega.BeNil(), // oldObj should be nil + gomega.Not(gomega.BeNil()), // newObj should not be nil + gomega.Not(gomega.BeNil()), // config should not be nil + ) + } connCtx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) defer cancel() @@ -3845,13 +3869,13 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, }, }, - &v1.NamespaceList{ - Items: []v1.Namespace{*egressNamespace}, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace}, }, - &v1.PodList{ - Items: []v1.Pod{egressPod}, + &corev1.PodList{ + Items: []corev1.Pod{egressPod}, }, - &v1.NodeList{Items: []v1.Node{*newNodeGlobalZoneNotEgressableV6Only(node2Name, "0:0:0:0:0:feff:c0a8:8e0c/64"), + &corev1.NodeList{Items: []corev1.Node{*newNodeGlobalZoneNotEgressableV6Only(node2Name, "0:0:0:0:0:feff:c0a8:8e0c/64"), *newNodeGlobalZoneNotEgressableV6Only(node1Name, "0:0:0:0:0:fedf:c0a8:8e0c/64")}}, ) @@ -3893,17 +3917,14 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { gomega.Eventually(getEgressIPStatusLen(eIP.Name)).Should(gomega.Equal(1)) expectedNatLogicalPort := "k8s-node2" - egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets(nil) expectedDatabaseState := []libovsdbtest.TestData{ &nbdb.LogicalRouterPolicy{ - Priority: types.EgressIPReroutePriority, - Match: fmt.Sprintf("ip6.src == %s", egressPod.Status.PodIP), - Action: nbdb.LogicalRouterPolicyActionReroute, - Nexthops: nodeLogicalRouterIPv6, - ExternalIDs: map[string]string{ - "name": eIP.Name, - }, - UUID: "reroute-UUID", + Priority: types.EgressIPReroutePriority, + Match: fmt.Sprintf("ip6.src == %s", egressPod.Status.PodIP), + Action: nbdb.LogicalRouterPolicyActionReroute, + Nexthops: nodeLogicalRouterIPv6, + ExternalIDs: getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, IPFamilyValueV6, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + UUID: "reroute-UUID", }, &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, @@ -3916,12 +3937,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Networks: []string{nodeLogicalRouterIfAddrV6}, }, &nbdb.NAT{ - UUID: "egressip-nat-UUID", - LogicalIP: podV6IP, - ExternalIP: egressIP.String(), - ExternalIDs: map[string]string{ - "name": egressIPName, - }, + UUID: "egressip-nat-UUID", + LogicalIP: podV6IP, + ExternalIP: egressIP.String(), + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod.Namespace, egressPod.Name, IPFamilyValueV6, fakeOvn.controller.controllerName).GetExternalIDs(), Type: nbdb.NATTypeSNAT, LogicalPort: &expectedNatLogicalPort, Options: map[string]string{ @@ -3958,7 +3977,6 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: node2Name, Ports: []string{"k8s-" + node2Name + "-UUID"}, }, - egressIPServedPodsASv4, } gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) @@ -4002,34 +4020,34 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { vipIPv6CIDR := vipIPv6 + "/64" _, node2Subnet, _ := net.ParseCIDR(v6Node2Subnet) - node1 := v1.Node{ + node1 := corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: node1Name, Annotations: map[string]string{ util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\", \"%s\", \"%s\", \"%s\"]", node1IPv4CIDR, node1IPv6CIDR, vipIPv4CIDR, vipIPv6CIDR), }, }, - Status: v1.NodeStatus{ - Conditions: []v1.NodeCondition{ + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ { - Type: v1.NodeReady, - Status: v1.ConditionTrue, + Type: corev1.NodeReady, + Status: corev1.ConditionTrue, }, }, }, } - node2 := v1.Node{ + node2 := corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: node2Name, Annotations: map[string]string{ util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\", \"%s\"]", node2IPv4CIDR, node2IPv6CIDR), }, }, - Status: v1.NodeStatus{ - Conditions: []v1.NodeCondition{ + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ { - Type: v1.NodeReady, - Status: v1.ConditionTrue, + Type: corev1.NodeReady, + Status: corev1.ConditionTrue, }, }, }, @@ -4060,49 +4078,53 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { node2Switch, }, }, - &v1.NodeList{ - Items: []v1.Node{node1, node2}, + &corev1.NodeList{ + Items: []corev1.Node{node1, node2}, }, ) err := fakeOvn.controller.WatchEgressNodes() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - egressSVCServedPodsASv4, egressSVCServedPodsASv6 := buildEgressIPServiceAddressSets(nil) - egressIPServedPodsASv4, egressIPServedPodsASv6 := buildEgressIPServedPodsAddressSets(nil) + egressSVCServedPodsASv4, egressSVCServedPodsASv6 := buildEgressServiceAddressSets(nil) + egressIPServedPodsASv4, egressIPServedPodsASv6 := buildEgressIPServedPodsAddressSets(nil, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) egressNodeIPsASv4, egressNodeIPsASv6 := buildEgressIPNodeAddressSets([]string{node1IPv4, vipIPv4, node2IPv4, node1IPv6, vipIPv6, node2IPv6}) - node1Switch.QOSRules = []string{"egressip-QoS-UUID", "egressip-QoSv6-UUID"} - node2Switch.QOSRules = []string{"egressip-QoS-UUID", "egressip-QoSv6-UUID"} + node1Switch.QOSRules = []string{"default-QoS-UUID", "default-QoSv6-UUID"} + node2Switch.QOSRules = []string{"default-QoS-UUID", "default-QoSv6-UUID"} expectedDatabaseState := []libovsdbtest.TestData{ &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip6.src == $%s || ip6.src == $%s) && ip6.dst == $%s", egressIPServedPodsASv6.Name, egressSVCServedPodsASv6.Name, egressNodeIPsASv6.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-v6-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-v6-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV6, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, @@ -4112,7 +4134,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { "no-reroute-service-UUID", "default-no-reroute-node-UUID", "default-v6-no-reroute-node-UUID", - "egressip-no-reroute-reply-traffic", + "default-no-reroute-reply-traffic", }, }, &nbdb.LogicalSwitchPort{ @@ -4122,8 +4144,8 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, node1Switch, node2Switch, - getDefaultQoSRule(false), - getDefaultQoSRule(true), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), + getDefaultQoSRule(true, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressSVCServedPodsASv6, egressIPServedPodsASv4, egressIPServedPodsASv6, egressNodeIPsASv4, egressNodeIPsASv6, } @@ -4206,13 +4228,13 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, }, }, - &v1.NamespaceList{ - Items: []v1.Namespace{*egressNamespace}, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace}, }, - &v1.PodList{ - Items: []v1.Pod{egressPod}, + &corev1.PodList{ + Items: []corev1.Pod{egressPod}, }, - &v1.NodeList{Items: []v1.Node{*newNodeGlobalZoneNotEgressableV6Only(node2Name, "0:0:0:0:0:feff:c0a8:8e0c/64"), + &corev1.NodeList{Items: []corev1.Node{*newNodeGlobalZoneNotEgressableV6Only(node2Name, "0:0:0:0:0:feff:c0a8:8e0c/64"), *newNodeGlobalZoneNotEgressableV6Only(node1Name, "0:0:0:0:0:fedf:c0a8:8e0c/64")}}, ) @@ -4270,10 +4292,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { gomega.Eventually(getEgressIPStatusLen(eIP.Name)).Should(gomega.Equal(1)) expectedNatLogicalPort := "k8s-node2" - egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets(nil) expectedDatabaseState := []libovsdbtest.TestData{ - getReRoutePolicy(podV6IP, "6", "reroute-UUID", node2LogicalRouterIPv6, eipExternalID), - getEIPSNAT(podV6IP, egressIP.String(), expectedNatLogicalPort), + getReRoutePolicy(podV6IP, "6", "reroute-UUID", node2LogicalRouterIPv6, + getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, IPFamilyValueV6, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs()), + getEIPSNAT(podV6IP, egressPod.Namespace, egressPod.Name, egressIP.String(), expectedNatLogicalPort, DefaultNetworkControllerName), &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", @@ -4314,7 +4336,6 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: node2Name, Ports: []string{"k8s-" + node2Name + "-UUID"}, }, - egressIPServedPodsASv4, } if podZone == "remote" { expectedDatabaseState[2].(*nbdb.LogicalRouter).Policies = []string{} @@ -4340,13 +4361,13 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { egressPod := *newPodWithLabels(eipNamespace, podName, node1Name, "", egressPodLabel) egressNamespace := newNamespace(eipNamespace) fakeOvn.startWithDBSetup(clusterRouterDbSetup, - &v1.NamespaceList{ - Items: []v1.Namespace{*egressNamespace}, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace}, }, - &v1.PodList{ - Items: []v1.Pod{egressPod}, + &corev1.PodList{ + Items: []corev1.Pod{egressPod}, }, - &v1.NodeList{Items: []v1.Node{*newNodeGlobalZoneNotEgressableV4Only(node2Name, "0:0:0:0:0:feff:c0a8:8e0c/64"), + &corev1.NodeList{Items: []corev1.Node{*newNodeGlobalZoneNotEgressableV4Only(node2Name, "0:0:0:0:0:feff:c0a8:8e0c/64"), *newNodeGlobalZoneNotEgressableV4Only(node1Name, "0:0:0:0:0:fedf:c0a8:8e0c/64")}}, ) eIP := egressipv1.EgressIP{ @@ -4458,13 +4479,13 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, }, }, - &v1.NamespaceList{ - Items: []v1.Namespace{*egressNamespace}, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace}, }, - &v1.PodList{ - Items: []v1.Pod{egressPod}, + &corev1.PodList{ + Items: []corev1.Pod{egressPod}, }, - &v1.NodeList{Items: []v1.Node{*node1, + &corev1.NodeList{Items: []corev1.Node{*node1, *newNodeGlobalZoneNotEgressableV6Only(node2Name, "0:0:0:0:0:fedf:c0a8:8e0c/64")}}, ) @@ -4508,10 +4529,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { gomega.Eventually(getEgressIPStatusLen(eIP.Name)).Should(gomega.Equal(1)) expectedNatLogicalPort := "k8s-node2" - egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets(nil) expectedDatabaseState := []libovsdbtest.TestData{ - getReRoutePolicy(egressPod.Status.PodIP, "6", "reroute-UUID", node2LogicalRouterIPv6, eipExternalID), - getEIPSNAT(podV6IP, egressIP.String(), expectedNatLogicalPort), + getReRoutePolicy(egressPod.Status.PodIP, "6", "reroute-UUID", node2LogicalRouterIPv6, + getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, IPFamilyValueV6, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs()), + getEIPSNAT(podV6IP, egressPod.Namespace, egressPod.Name, egressIP.String(), expectedNatLogicalPort, DefaultNetworkControllerName), &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", @@ -4552,7 +4573,6 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: node2Name, Ports: []string{"k8s-" + node2Name + "-UUID"}, }, - egressIPServedPodsASv4, } if podZone == "remote" { // egressNode is in different zone than pod and egressNode is in local zone, so static reroute will be visible @@ -4609,7 +4629,6 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: node2Name, Ports: []string{"k8s-" + node2Name + "-UUID"}, }, - egressIPServedPodsASv4, } gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) return nil @@ -4684,13 +4703,13 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, }, }, - &v1.NamespaceList{ - Items: []v1.Namespace{*egressNamespace}, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace}, }, - &v1.PodList{ - Items: []v1.Pod{egressPod}, + &corev1.PodList{ + Items: []corev1.Pod{egressPod}, }, - &v1.NodeList{Items: []v1.Node{*node1, *newNodeGlobalZoneNotEgressableV6Only(node2Name, "0:0:0:0:0:feff:c0a8:8e0c/64")}}, + &corev1.NodeList{Items: []corev1.Node{*node1, *newNodeGlobalZoneNotEgressableV6Only(node2Name, "0:0:0:0:0:feff:c0a8:8e0c/64")}}, ) eIP := egressipv1.EgressIP{ ObjectMeta: newEgressIPMeta(egressIPName), @@ -4755,10 +4774,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { gomega.Eventually(getEgressIPStatusLen(eIP.Name)).Should(gomega.Equal(1)) expectedNatLogicalPort := "k8s-node2" - egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets(nil) expectedDatabaseState := []libovsdbtest.TestData{ - getReRoutePolicy(egressPod.Status.PodIP, "6", "reroute-UUID", nodeLogicalRouterIPv6, eipExternalID), - getEIPSNAT(podV6IP, egressIP.String(), expectedNatLogicalPort), + getReRoutePolicy(egressPod.Status.PodIP, "6", "reroute-UUID", nodeLogicalRouterIPv6, + getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, IPFamilyValueV6, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs()), + getEIPSNAT(podV6IP, egressPod.Namespace, egressPod.Name, egressIP.String(), expectedNatLogicalPort, DefaultNetworkControllerName), &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", @@ -4799,7 +4818,6 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: node2Name, Ports: []string{"k8s-" + node2Name + "-UUID"}, }, - egressIPServedPodsASv4, } if podZone == "remote" { expectedDatabaseState[2].(*nbdb.LogicalRouter).Policies = []string{} @@ -4854,7 +4872,6 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: node2Name, Ports: []string{"k8s-" + node2Name + "-UUID"}, }, - egressIPServedPodsASv4, } gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) return nil @@ -4885,6 +4902,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { } _, node1Subnet, _ := net.ParseCIDR(v6Node1Subnet) _, node2Subnet, _ := net.ParseCIDR(v6Node2Subnet) + egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets(nil, types.DefaultNetworkName, DefaultNetworkControllerName) fakeOvn.startWithDBSetup( libovsdbtest.TestSetup{ NBData: []libovsdbtest.TestData{ @@ -4927,15 +4945,16 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: node2Name, Ports: []string{"k8s-" + node2Name + "-UUID"}, }, + egressIPServedPodsASv4, }, }, - &v1.NamespaceList{ - Items: []v1.Namespace{*egressNamespace}, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace}, }, - &v1.PodList{ - Items: []v1.Pod{egressPod}, + &corev1.PodList{ + Items: []corev1.Pod{egressPod}, }, - &v1.NodeList{Items: []v1.Node{*node1, + &corev1.NodeList{Items: []corev1.Node{*node1, *newNodeGlobalZoneNotEgressableV6Only(node2Name, "0:0:0:0:0:fedf:c0a8:8e0c/64")}}, ) @@ -4979,11 +4998,9 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { gomega.Eventually(getEgressIPStatusLen(eIP.Name)).Should(gomega.Equal(1)) expectedNatLogicalPort := "k8s-node2" - - egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets(nil) - expectedDatabaseState := []libovsdbtest.TestData{ - getReRoutePolicy(egressPod.Status.PodIP, "6", "reroute-UUID", node2LogicalRouterIPv6, eipExternalID), + getReRoutePolicy(egressPod.Status.PodIP, "6", "reroute-UUID", node2LogicalRouterIPv6, + getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, IPFamilyValueV6, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs()), &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", @@ -4995,12 +5012,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Networks: []string{node2LogicalRouterIfAddrV6}, }, &nbdb.NAT{ - UUID: "egressip-nat-UUID", - LogicalIP: podV6IP, - ExternalIP: egressIP.String(), - ExternalIDs: map[string]string{ - "name": egressIPName, - }, + UUID: "egressip-nat-UUID", + LogicalIP: podV6IP, + ExternalIP: egressIP.String(), + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod.Namespace, egressPod.Name, IPFamilyValueV6, fakeOvn.controller.controllerName).GetExternalIDs(), Type: nbdb.NATTypeSNAT, LogicalPort: &expectedNatLogicalPort, Options: map[string]string{ @@ -5118,13 +5133,13 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { egressPod := *newPodWithLabels(eipNamespace, podName, node1Name, "", egressPodLabel) egressNamespace := newNamespaceWithLabels(eipNamespace, egressPodLabel) fakeOvn.startWithDBSetup(clusterRouterDbSetup, - &v1.NamespaceList{ - Items: []v1.Namespace{*egressNamespace}, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace}, }, - &v1.PodList{ - Items: []v1.Pod{egressPod}, + &corev1.PodList{ + Items: []corev1.Pod{egressPod}, }, - &v1.NodeList{Items: []v1.Node{*newNodeGlobalZoneNotEgressableV6Only(node2Name, "0:0:0:0:0:feff:c0a8:8e0c/64"), + &corev1.NodeList{Items: []corev1.Node{*newNodeGlobalZoneNotEgressableV6Only(node2Name, "0:0:0:0:0:feff:c0a8:8e0c/64"), *newNodeGlobalZoneNotEgressableV6Only(node1Name, "0:0:0:0:0:fedf:c0a8:8e0c/64")}}, ) @@ -5315,14 +5330,14 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { node2Switch, }, }, - &v1.NodeList{ - Items: []v1.Node{node1, node2}, + &corev1.NodeList{ + Items: []corev1.Node{node1, node2}, }, - &v1.NamespaceList{ - Items: []v1.Namespace{*egressNamespace}, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace}, }, - &v1.PodList{ - Items: []v1.Pod{egressPod}, + &corev1.PodList{ + Items: []corev1.Pod{egressPod}, }) i, n, _ := net.ParseCIDR(podV4IP + "/23") @@ -5330,8 +5345,8 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { fakeOvn.controller.logicalPortCache.add(&egressPod, "", types.DefaultNetworkName, "", nil, []*net.IPNet{n}) if interconnect && node1Zone != node2Zone { fakeOvn.controller.zone = "local" + fakeOvn.controller.eIPC.zone = "local" } - err := fakeOvn.controller.WatchEgressIPNamespaces() gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = fakeOvn.controller.WatchEgressIPPods() @@ -5357,7 +5372,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { EgressIP: egressIP2, }, } - err = fakeOvn.controller.patchReplaceEgressIPStatus(eIP.Name, status) + err = fakeOvn.controller.eIPC.patchReplaceEgressIPStatus(eIP.Name, status) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Eventually(getEgressIPStatusLen(eIP.Name)).Should(gomega.Equal(2)) @@ -5368,12 +5383,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { expectedNatLogicalPort1 := fmt.Sprintf("k8s-%s", assignmentNode1) expectedNatLogicalPort2 := fmt.Sprintf("k8s-%s", assignmentNode2) natEIP1 := &nbdb.NAT{ - UUID: "egressip-nat-1-UUID", - LogicalIP: podV4IP, - ExternalIP: assignedEgressIP1, - ExternalIDs: map[string]string{ - "name": egressIPName, - }, + UUID: "egressip-nat-1-UUID", + LogicalIP: podV4IP, + ExternalIP: assignedEgressIP1, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod.Namespace, egressPod.Name, IPFamilyValueV4, fakeOvn.controller.controllerName).GetExternalIDs(), Type: nbdb.NATTypeSNAT, LogicalPort: &expectedNatLogicalPort1, Options: map[string]string{ @@ -5381,12 +5394,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, } natEIP2 := &nbdb.NAT{ - UUID: "egressip-nat-2-UUID", - LogicalIP: podV4IP, - ExternalIP: assignedEgressIP2, - ExternalIDs: map[string]string{ - "name": egressIPName, - }, + UUID: "egressip-nat-2-UUID", + LogicalIP: podV4IP, + ExternalIP: assignedEgressIP2, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod.Namespace, egressPod.Name, IPFamilyValueV4, fakeOvn.controller.controllerName).GetExternalIDs(), Type: nbdb.NATTypeSNAT, LogicalPort: &expectedNatLogicalPort2, Options: map[string]string{ @@ -5394,23 +5405,26 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, } - egressSVCServedPodsASv4, _ := buildEgressIPServiceAddressSets(nil) - egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}) + egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) + egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) egressNodeIPsASv4, _ := buildEgressIPNodeAddressSets([]string{node1IPv4, node2IPv4}) expectedDatabaseState := []libovsdbtest.TestData{ - getReRoutePolicy(egressPod.Status.PodIP, "4", "reroute-UUID", []string{"100.64.0.2", "100.64.0.3"}, eipExternalID), + getReRoutePolicy(egressPod.Status.PodIP, "4", "reroute-UUID", []string{"100.64.0.2", "100.64.0.3"}, + getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs()), &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + assignmentNode1, @@ -5426,7 +5440,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", Policies: []string{"reroute-UUID", "default-no-reroute-UUID", "no-reroute-service-UUID", - "default-no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"}, + "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouterPort{ UUID: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node2.Name + "-UUID", @@ -5462,9 +5476,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalSwitchPort{ UUID: "k8s-" + node1Name + "-UUID", @@ -5486,10 +5501,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: types.ExternalSwitchPrefix + node2Name, Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name + "-UUID"}, }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), node1Switch, node2Switch, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressIPServedPodsASv4, egressNodeIPsASv4, @@ -5497,7 +5512,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { if !interconnect || node1Zone != "remote" { expectedDatabaseState[3].(*nbdb.LogicalRouter).Nat = []string{"egressip-nat-1-UUID"} expectedDatabaseState = append(expectedDatabaseState, natEIP1) - node1Switch.QOSRules = []string{"egressip-QoS-UUID"} + node1Switch.QOSRules = []string{"default-QoS-UUID"} } if node1Zone == "local" { expectedDatabaseState = append(expectedDatabaseState, getReRouteStaticRoute(v4ClusterSubnet, nodeLogicalRouterIPv4[0])) @@ -5506,7 +5521,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { if !interconnect || node2Zone != "remote" { expectedDatabaseState[4].(*nbdb.LogicalRouter).Nat = []string{"egressip-nat-2-UUID"} expectedDatabaseState = append(expectedDatabaseState, natEIP2) - node2Switch.QOSRules = []string{"egressip-QoS-UUID"} + node2Switch.QOSRules = []string{"default-QoS-UUID"} } if node2Zone == "local" { expectedDatabaseState = append(expectedDatabaseState, getReRouteStaticRoute(v4ClusterSubnet, node2LogicalRouterIPv4[0])) @@ -5519,7 +5534,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { } if node2Zone != node1Zone && node1Zone == "remote" { expectedDatabaseState[5].(*nbdb.LogicalRouter).Policies = []string{"default-no-reroute-UUID", "no-reroute-service-UUID", - "default-no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"} + "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic"} expectedDatabaseState = expectedDatabaseState[1:] // policy is not visible since podNode is remote } gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) @@ -5543,7 +5558,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { EgressIP: egressIP2, }, } - err = fakeOvn.controller.patchReplaceEgressIPStatus(eIP.Name, status) + err = fakeOvn.controller.eIPC.patchReplaceEgressIPStatus(eIP.Name, status) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Eventually(func() []string { @@ -5558,18 +5573,21 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { expectedNatLogicalPort1 = fmt.Sprintf("k8s-%s", assignmentNode1) expectedNatLogicalPort2 = fmt.Sprintf("k8s-%s", assignmentNode2) expectedDatabaseState = []libovsdbtest.TestData{ - getReRoutePolicy(egressPod.Status.PodIP, "4", "reroute-UUID", []string{"100.64.0.2", "100.64.0.3"}, eipExternalID), + getReRoutePolicy(egressPod.Status.PodIP, "4", "reroute-UUID", []string{"100.64.0.2", "100.64.0.3"}, + getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs()), &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + assignmentNode1, @@ -5585,7 +5603,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", Policies: []string{"reroute-UUID", "default-no-reroute-UUID", "no-reroute-service-UUID", - "default-no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"}, + "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouterPort{ UUID: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node2.Name + "-UUID", @@ -5621,9 +5639,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalSwitchPort{ UUID: "k8s-" + node1Name + "-UUID", @@ -5645,10 +5664,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: types.ExternalSwitchPrefix + node2Name, Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name + "-UUID"}, }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), node1Switch, node2Switch, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressIPServedPodsASv4, egressNodeIPsASv4, @@ -5657,7 +5676,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { expectedDatabaseState[3].(*nbdb.LogicalRouter).Nat = []string{"egressip-nat-1-UUID"} natEIP1.ExternalIP = assignedEgressIP1 expectedDatabaseState = append(expectedDatabaseState, natEIP1) - node1Switch.QOSRules = []string{"egressip-QoS-UUID"} + node1Switch.QOSRules = []string{"default-QoS-UUID"} } if node1Zone == "local" { expectedDatabaseState = append(expectedDatabaseState, getReRouteStaticRoute(v4ClusterSubnet, nodeLogicalRouterIPv4[0])) @@ -5667,7 +5686,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { expectedDatabaseState[4].(*nbdb.LogicalRouter).Nat = []string{"egressip-nat-2-UUID"} natEIP2.ExternalIP = assignedEgressIP2 expectedDatabaseState = append(expectedDatabaseState, natEIP2) - node2Switch.QOSRules = []string{"egressip-QoS-UUID"} + node2Switch.QOSRules = []string{"default-QoS-UUID"} } if node2Zone == "local" { expectedDatabaseState = append(expectedDatabaseState, getReRouteStaticRoute(v4ClusterSubnet, node2LogicalRouterIPv4[0])) @@ -5680,7 +5699,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { } if node2Zone != node1Zone && node1Zone == "remote" { expectedDatabaseState[5].(*nbdb.LogicalRouter).Policies = []string{"default-no-reroute-UUID", "no-reroute-service-UUID", - "default-no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"} + "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic"} expectedDatabaseState = expectedDatabaseState[1:] // policy is not visible since podNode is remote } gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) @@ -5710,6 +5729,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { egressNamespace := newNamespaceWithLabels(eipNamespace, egressPodLabel) _, node1Subnet, _ := net.ParseCIDR(v6Node1Subnet) _, node2Subnet, _ := net.ParseCIDR(v6Node2Subnet) + egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets(nil, types.DefaultNetworkName, DefaultNetworkControllerName) fakeOvn.startWithDBSetup( libovsdbtest.TestSetup{ @@ -5753,15 +5773,16 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: node2Name, Ports: []string{"k8s-" + node2Name + "-UUID"}, }, + egressIPServedPodsASv4, }, }, - &v1.NamespaceList{ - Items: []v1.Namespace{*egressNamespace}, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace}, }, - &v1.PodList{ - Items: []v1.Pod{egressPod}, + &corev1.PodList{ + Items: []corev1.Pod{egressPod}, }, - &v1.NodeList{Items: []v1.Node{*newNodeGlobalZoneNotEgressableV6Only(node2Name, "0:0:0:0:0:feff:c0a8:8e0c/64"), + &corev1.NodeList{Items: []corev1.Node{*newNodeGlobalZoneNotEgressableV6Only(node2Name, "0:0:0:0:0:feff:c0a8:8e0c/64"), *newNodeGlobalZoneNotEgressableV6Only(node1Name, "0:0:0:0:0:fedf:c0a8:8e0c/64")}}, ) @@ -5804,29 +5825,25 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { expectedNatLogicalPort := "k8s-node2" expectedNAT := &nbdb.NAT{ - UUID: "egressip-nat-UUID", - LogicalIP: podV6IP, - ExternalIP: egressIP.String(), - ExternalIDs: map[string]string{ - "name": egressIPName, - }, + UUID: "egressip-nat-UUID", + LogicalIP: podV6IP, + ExternalIP: egressIP.String(), + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod.Namespace, egressPod.Name, IPFamilyValueV6, fakeOvn.controller.controllerName).GetExternalIDs(), Type: nbdb.NATTypeSNAT, LogicalPort: &expectedNatLogicalPort, Options: map[string]string{ "stateless": "false", }, } - egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets(nil) + expectedDatabaseState := []libovsdbtest.TestData{ &nbdb.LogicalRouterPolicy{ - Priority: types.EgressIPReroutePriority, - Match: fmt.Sprintf("ip6.src == %s", egressPod.Status.PodIP), - Action: nbdb.LogicalRouterPolicyActionReroute, - Nexthops: node2LogicalRouterIPv6, - ExternalIDs: map[string]string{ - "name": eIP.Name, - }, - UUID: "reroute-UUID", + Priority: types.EgressIPReroutePriority, + Match: fmt.Sprintf("ip6.src == %s", egressPod.Status.PodIP), + Action: nbdb.LogicalRouterPolicyActionReroute, + Nexthops: node2LogicalRouterIPv6, + ExternalIDs: getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, IPFamilyValueV6, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + UUID: "reroute-UUID", }, &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, @@ -5993,7 +6010,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, } fakeOvn.startWithDBSetup(libovsdbtest.TestSetup{}) - err := fakeOvn.controller.reconcileEgressIP(&eIP, &eIP) + err := fakeOvn.controller.eIPC.reconcileEgressIP(&eIP, &eIP) gomega.Expect(err).To(gomega.HaveOccurred()) return nil } @@ -6114,11 +6131,11 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Nodes().Create(context.TODO(), &node2, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - node1Switch.QOSRules = []string{"egressip-QoS-UUID", "egressip-QoSv6-UUID"} - node2Switch.QOSRules = []string{"egressip-QoS-UUID", "egressip-QoSv6-UUID"} + node1Switch.QOSRules = []string{"default-QoS-UUID", "default-QoSv6-UUID"} + node2Switch.QOSRules = []string{"default-QoS-UUID", "default-QoSv6-UUID"} - egressSVCServedPodsASv4, egressSVCServedPodsASv6 := buildEgressIPServiceAddressSets(nil) - egressIPServedPodsASv4, egressIPServedPodsASv6 := buildEgressIPServedPodsAddressSets(nil) + egressSVCServedPodsASv4, egressSVCServedPodsASv6 := buildEgressServiceAddressSets(nil) + egressIPServedPodsASv4, egressIPServedPodsASv6 := buildEgressIPServedPodsAddressSets(nil, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) egressNodeIPsASv4, egressNodeIPsASv6 := buildEgressIPNodeAddressSets([]string{node1IPv4, node1IPv6, node2IPv4}) expectedDatabaseState := []libovsdbtest.TestData{ @@ -6126,36 +6143,40 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.LogicalRouterPolicy{ Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip6.src == $%s || ip6.src == $%s) && ip6.dst == $%s", egressIPServedPodsASv6.Name, egressSVCServedPodsASv6.Name, egressNodeIPsASv6.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-v6-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-v6-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV6, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", Policies: []string{"reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", - "default-v6-no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"}, + "default-v6-no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node1.Name, @@ -6207,8 +6228,8 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, node1Switch, node2Switch, - getDefaultQoSRule(false), - getDefaultQoSRule(true), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), + getDefaultQoSRule(true, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressSVCServedPodsASv6, egressIPServedPodsASv4, egressIPServedPodsASv6, egressNodeIPsASv4, egressNodeIPsASv6, @@ -6320,16 +6341,18 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Policies: []string{"reroute-UUID", "no-reroute-service-UUID"}, }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node.Name, @@ -6464,14 +6487,14 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { &egressipv1.EgressIPList{ Items: []egressipv1.EgressIP{eIP}, }, - &v1.NodeList{ - Items: []v1.Node{node1}, + &corev1.NodeList{ + Items: []corev1.Node{node1}, }, - &v1.NamespaceList{ - Items: []v1.Namespace{*egressNamespace}, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace}, }, - &v1.PodList{ - Items: []v1.Pod{egressPod1}, + &corev1.PodList{ + Items: []corev1.Pod{egressPod1}, }, ) fakeOvn.controller.lsManager.AddOrUpdateSwitch(node1.Name, []*net.IPNet{ovntest.MustParseIPNet(v4Node1Subnet)}) @@ -6495,12 +6518,12 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { expectedNatLogicalPort1 := "k8s-node1" - nodeSwitch.QOSRules = []string{"egressip-QoS-UUID"} + nodeSwitch.QOSRules = []string{"default-QoS-UUID"} namespaceAddressSetv4, _ := buildNamespaceAddressSets(eipNamespace, []string{egressPodIP.String()}) - egressSVCServedPodsASv4, _ := buildEgressIPServiceAddressSets(nil) - egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{egressPodIP.String()}) + egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) + egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{egressPodIP.String()}, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) egressNodeIPsASv4, _ := buildEgressIPNodeAddressSets([]string{node1IPv4}) expectedDatabaseStatewithPod := []libovsdbtest.TestData{ @@ -6508,38 +6531,39 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.EgressIPReroutePriority, - Match: fmt.Sprintf("ip4.src == %s", egressPodIP), - Action: nbdb.LogicalRouterPolicyActionReroute, - Nexthops: nodeLogicalRouterIPv4, - ExternalIDs: map[string]string{ - "name": eIP.Name, - }, - UUID: "reroute-UUID1", + Priority: types.EgressIPReroutePriority, + Match: fmt.Sprintf("ip4.src == %s", egressPodIP), + Action: nbdb.LogicalRouterPolicyActionReroute, + Nexthops: nodeLogicalRouterIPv4, + ExternalIDs: getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod1.Namespace, egressPod1.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + UUID: "reroute-UUID1", }, &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "reroute-UUID1", - "default-no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"}, + "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node1.Name, @@ -6548,12 +6572,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Nat: []string{"egressip-nat-UUID1"}, }, &nbdb.NAT{ - UUID: "egressip-nat-UUID1", - LogicalIP: egressPodIP.String(), - ExternalIP: egressIP1, - ExternalIDs: map[string]string{ - "name": egressIPName, - }, + UUID: "egressip-nat-UUID1", + LogicalIP: egressPodIP.String(), + ExternalIP: egressIP1, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod1.Namespace, egressPod1.Name, IPFamilyValueV4, fakeOvn.controller.controllerName).GetExternalIDs(), Type: nbdb.NATTypeSNAT, LogicalPort: &expectedNatLogicalPort1, Options: map[string]string{ @@ -6586,7 +6608,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: types.ExternalSwitchPrefix + node1Name, Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name + "-UUID"}, }, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressIPServedPodsASv4, egressNodeIPsASv4, @@ -6626,27 +6648,30 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", - Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"}, + Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node1.Name, @@ -6680,7 +6705,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: types.ExternalSwitchPrefix + node1Name, Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name + "-UUID"}, }, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressIPServedPodsASv4, egressNodeIPsASv4, @@ -6690,8 +6715,8 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { // recreate pod with same name immediately; simulating handler race (pods v/s egressip) condition, // so instead of proper pod create, we try out egressIP pod setup which will be a no-op since pod doesn't exist ginkgo.By("should not add egress IP setup for a deleted pod whose entry exists in logicalPortCache") - err = fakeOvn.controller.addPodEgressIPAssignments(egressIPName, eIP.Status.Items, &egressPod1) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.eIPC.addPodEgressIPAssignments(fakeOvn.controller, egressIPName, eIP.Status.Items, util.EgressIPMark{}, &egressPod1) + gomega.Expect(err).To(gomega.HaveOccurred()) // pod is gone but logicalPortCache holds the entry for 60seconds egressPodPortInfo, err = fakeOvn.controller.logicalPortCache.get(&egressPod1, types.DefaultNetworkName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -6807,14 +6832,14 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { &egressipv1.EgressIPList{ Items: []egressipv1.EgressIP{eIP}, }, - &v1.NodeList{ - Items: []v1.Node{node1}, + &corev1.NodeList{ + Items: []corev1.Node{node1}, }, - &v1.NamespaceList{ - Items: []v1.Namespace{*egressNamespace}, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace}, }, - &v1.PodList{ - Items: []v1.Pod{*egressPod1}, + &corev1.PodList{ + Items: []corev1.Pod{*egressPod1}, }, ) fakeOvn.controller.lsManager.AddOrUpdateSwitch(node1.Name, []*net.IPNet{ovntest.MustParseIPNet(v4Node1Subnet)}) @@ -6834,12 +6859,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { expectedNatLogicalPort1 := "k8s-node1" podEIPSNAT := &nbdb.NAT{ - UUID: "egressip-nat-UUID1", - LogicalIP: egressPodIP.String(), - ExternalIP: egressIP1, - ExternalIDs: map[string]string{ - "name": egressIPName, - }, + UUID: "egressip-nat-UUID1", + LogicalIP: egressPodIP.String(), + ExternalIP: egressIP1, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod1.Namespace, egressPod1.Name, IPFamilyValueV4, fakeOvn.controller.controllerName).GetExternalIDs(), Type: nbdb.NATTypeSNAT, LogicalPort: &expectedNatLogicalPort1, Options: map[string]string{ @@ -6847,14 +6870,12 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, } podReRoutePolicy := &nbdb.LogicalRouterPolicy{ - Priority: types.EgressIPReroutePriority, - Match: fmt.Sprintf("ip4.src == %s", oldEgressPodIP), - Action: nbdb.LogicalRouterPolicyActionReroute, - Nexthops: nodeLogicalRouterIPv4, - ExternalIDs: map[string]string{ - "name": eIP.Name, - }, - UUID: "reroute-UUID1", + Priority: types.EgressIPReroutePriority, + Match: fmt.Sprintf("ip4.src == %s", oldEgressPodIP), + Action: nbdb.LogicalRouterPolicyActionReroute, + Nexthops: nodeLogicalRouterIPv4, + ExternalIDs: getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod1.Namespace, egressPod1.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + UUID: "reroute-UUID1", } node1GR := &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node1.Name, @@ -6862,10 +6883,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Ports: []string{types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1.Name + "-UUID"}, Nat: []string{"egressip-nat-UUID1"}, } - nodeSwitch.QOSRules = []string{"egressip-QoS-UUID"} + nodeSwitch.QOSRules = []string{"default-QoS-UUID"} - egressSVCServedPodsASv4, _ := buildEgressIPServiceAddressSets(nil) - egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{oldEgressPodIP}) + egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) + egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{oldEgressPodIP}, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) egressNodeIPsASv4, _ := buildEgressIPNodeAddressSets([]string{node1IPv4}) namespaceAddressSetv4, _ := buildNamespaceAddressSets(eipNamespace, []string{egressPodIP.String()}) @@ -6875,29 +6896,32 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, podReRoutePolicy, &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "reroute-UUID1", - "default-no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"}, + "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, }, node1GR, &nbdb.LogicalSwitchPort{ @@ -6926,11 +6950,11 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: types.ExternalSwitchPrefix + node1Name, Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name + "-UUID"}, }, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, - egressIPServedPodsASv4, egressNodeIPsASv4, namespaceAddressSetv4, + egressIPServedPodsASv4, } podLSP := &nbdb.LogicalSwitchPort{ UUID: util.GetLogicalPortName(egressPod1.Namespace, egressPod1.Name) + "-UUID", @@ -6975,38 +6999,15 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { // Delete the pod to trigger the cleanup failure err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(egressPod1.Namespace).Delete(context.TODO(), egressPod1.Name, metav1.DeleteOptions{}) - - // internally we have an error: - // E1006 12:51:59.594899 2500972 obj_retry.go:1517] Failed to delete *factory.egressIPPod egressip-namespace/egress-pod, error: pod egressip-namespace/egress-pod: no pod IPs found - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // expect pod's port to have been gone, while egressIPPod still being present - gomega.Eventually(func() error { - _, err := libovsdbops.GetLogicalSwitchPort(fakeOvn.controller.nbClient, &nbdb.LogicalSwitchPort{Name: podLSP.Name}) - if err != nil { - return err - } - return nil - - }, 5).Should(gomega.Equal(libovsdbclient.ErrNotFound)) - - // egressIP cache is stale in the sense the podKey has not been deleted since deletion failed - pas := getPodAssignmentState(egressPod1) - gomega.Expect(pas).NotTo(gomega.BeNil()) - gomega.Expect(pas.egressStatuses.statusMap).To(gomega.Equal(statusMap{ - { - Node: "node1", - EgressIP: "192.168.126.101", - }: "", - })) // recreate pod with same name immediately; ginkgo.By("should add egress IP setup for the NEW pod which exists in logicalPortCache") newEgressPodIP := "10.128.0.60" egressPod1 = newPodWithLabels(eipNamespace, podName, node1Name, newEgressPodIP, egressPodLabel) egressPod1.Annotations = map[string]string{"k8s.ovn.org/pod-networks": `{"default":{"ip_addresses":["10.128.0.60/24"],"mac_address":"0a:58:0a:80:00:06","gateway_ips":["10.128.0.1"],"routes":[{"dest":"10.128.0.0/24","nextHop":"10.128.0.1"}],"ip_address":"10.128.0.60/24","gateway_ip":"10.128.0.1"}}`} - _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(egressPod1.Namespace).Create(context.TODO(), egressPod1, metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - + gomega.Eventually(func() error { + _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(egressPod1.Namespace).Create(context.TODO(), egressPod1, metav1.CreateOptions{}) + return err + }, "5s", "1s").ShouldNot(gomega.HaveOccurred()) // wait for the logical port cache to get updated with the new pod's IP var newEgressPodPortInfo *lpInfo getEgressPodIP := func() string { @@ -7020,50 +7021,22 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { return getEgressPodIP() }).Should(gomega.Equal(newEgressPodIP)) gomega.Expect(newEgressPodPortInfo.expires.IsZero()).To(gomega.BeTrue()) - - // deletion for the older EIP pod object is still being retried so we still have SNAT - // towards nodeIP for new pod which is created by addLogicalPort. - // Note that we while have the stale re-route policy for old pod, the snat for the old pod towards egressIP is gone - // because deleteLogicalPort removes ALL snats for a given pod but doesn't remove the policies. - ipv4Addr, _, _ := net.ParseCIDR(node1IPv4CIDR) - podNodeSNAT := &nbdb.NAT{ - UUID: "node-nat-UUID1", - LogicalIP: newEgressPodIP, - ExternalIP: ipv4Addr.String(), - Type: nbdb.NATTypeSNAT, - Options: map[string]string{ - "stateless": "false", - }, - } - finalDatabaseStatewithPod = append(finalDatabaseStatewithPod, podNodeSNAT) - node1GR.Nat = []string{podNodeSNAT.UUID} - podAddr = fmt.Sprintf("%s %s", newEgressPodPortInfo.mac.String(), newEgressPodIP) - podLSP.PortSecurity = []string{podAddr} - podLSP.Addresses = []string{podAddr} - namespaceAddressSetv4.Addresses = []string{newEgressPodIP} - gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(finalDatabaseStatewithPod[1:])) - - ginkgo.By("trigger a forced retry and ensure deletion of oldPod and creation of newPod are successful") - // let us add back the annotation to the oldPod which is being retried to make deletion a success - podKey, err := retry.GetResourceKey(egressPod1) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - retry.CheckRetryObjectEventually(podKey, true, fakeOvn.controller.retryEgressIPPods) - retryOldObj := retry.GetOldObjFromRetryObj(podKey, fakeOvn.controller.retryEgressIPPods) - //fakeOvn.controller.retryEgressIPPods.retryEntries.LoadOrStore(podKey, &RetryObjEntry{backoffSec: 1}) - pod, _ := retryOldObj.(*v1.Pod) - pod.Annotations = oldAnnotation - fakeOvn.controller.retryEgressIPPods.RequestRetryObjs() - // there should also be no entry for this pod in the retry cache - gomega.Eventually(func() bool { - return retry.CheckRetryObj(podKey, fakeOvn.controller.retryEgressIPPods) - }, retry.RetryObjInterval+time.Second).Should(gomega.BeFalse()) - // ensure that egressIP setup is being done with the new pod's information from logicalPortCache podReRoutePolicy.Match = fmt.Sprintf("ip4.src == %s", newEgressPodIP) podEIPSNAT.LogicalIP = newEgressPodIP node1GR.Nat = []string{podEIPSNAT.UUID} egressIPServedPodsASv4.Addresses = []string{"10.128.0.60"} - gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(finalDatabaseStatewithPod[:len(finalDatabaseStatewithPod)-1])) + + podPortInfo, err := fakeOvn.controller.logicalPortCache.get(egressPod1, types.DefaultNetworkName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + egressPodIP, _, err = net.ParseCIDR(podPortInfo.ips[0].String()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(podPortInfo.expires.IsZero()).To(gomega.BeTrue()) + podAddr = fmt.Sprintf("%s %s", podPortInfo.mac.String(), egressPodIP) + podLSP.Addresses = []string{podAddr} + podLSP.PortSecurity = []string{podAddr} + namespaceAddressSetv4.Addresses = []string{egressPodIP.String()} + gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(finalDatabaseStatewithPod)) return nil } @@ -7230,16 +7203,26 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { &egressipv1.EgressIPList{ Items: []egressipv1.EgressIP{eIP1, eIP2}, }, - &v1.NodeList{ - Items: []v1.Node{node1, node2}, + &corev1.NodeList{ + Items: []corev1.Node{node1, node2}, }, - &v1.NamespaceList{ - Items: []v1.Namespace{*egressNamespace}, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace}, }, - &v1.PodList{ - Items: []v1.Pod{egressPod1}, + &corev1.PodList{ + Items: []corev1.Pod{egressPod1}, }, ) + isNode1Local := true + if node1Zone == "remote" { + isNode1Local = false + } + isNode2Local := true + if node2Zone == "remote" { + isNode2Local = false + } + fakeOvn.controller.localZoneNodes.Store(node1.Name, isNode1Local) + fakeOvn.controller.localZoneNodes.Store(node2.Name, isNode2Local) fakeOvn.controller.lsManager.AddOrUpdateSwitch(node1.Name, []*net.IPNet{ovntest.MustParseIPNet(v4Node1Subnet)}) fakeOvn.controller.lsManager.AddOrUpdateSwitch(node2.Name, []*net.IPNet{ovntest.MustParseIPNet(v4Node2Subnet)}) err := fakeOvn.controller.WatchPods() @@ -7264,7 +7247,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { EgressIP: egressIP3, }, } - err = fakeOvn.controller.patchReplaceEgressIPStatus(egressIP2Name, status) + err = fakeOvn.controller.eIPC.patchReplaceEgressIPStatus(egressIP2Name, status) gomega.Expect(err).NotTo(gomega.HaveOccurred()) egressPodPortInfo, err := fakeOvn.controller.logicalPortCache.get(&egressPod1, types.DefaultNetworkName) @@ -7306,12 +7289,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { g.Expect(pas.standbyEgressIPNames.Has(egressIP2Name)).To(gomega.BeTrue()) }).Should(gomega.Succeed()) podEIPSNAT := &nbdb.NAT{ - UUID: "egressip-nat-UUID1", - LogicalIP: egressPodIP[0].String(), - ExternalIP: assignedEIP, - ExternalIDs: map[string]string{ - "name": pas.egressIPName, - }, + UUID: "egressip-nat-UUID1", + LogicalIP: egressPodIP[0].String(), + ExternalIP: assignedEIP, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, eipNamespace, podName, IPFamilyValueV4, fakeOvn.controller.controllerName).GetExternalIDs(), Type: nbdb.NATTypeSNAT, LogicalPort: utilpointer.String("k8s-node1"), Options: map[string]string{ @@ -7319,19 +7300,17 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, } podReRoutePolicy := &nbdb.LogicalRouterPolicy{ - Priority: types.EgressIPReroutePriority, - Match: fmt.Sprintf("ip4.src == %s", egressPodIP[0].String()), - Action: nbdb.LogicalRouterPolicyActionReroute, - Nexthops: nodeLogicalRouterIPv4, - ExternalIDs: map[string]string{ - "name": pas.egressIPName, - }, - UUID: "reroute-UUID1", + Priority: types.EgressIPReroutePriority, + Match: fmt.Sprintf("ip4.src == %s", egressPodIP[0].String()), + Action: nbdb.LogicalRouterPolicyActionReroute, + Nexthops: nodeLogicalRouterIPv4, + ExternalIDs: getEgressIPLRPReRouteDbIDs(pas.egressIPName, egressPod1.Namespace, egressPod1.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + UUID: "reroute-UUID1", } node1GR.Nat = []string{"egressip-nat-UUID1"} - egressSVCServedPodsASv4, _ := buildEgressIPServiceAddressSets(nil) - egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{egressPodIP[0].String()}) + egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) + egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{egressPodIP[0].String()}, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) egressNodeIPsASv4, _ := buildEgressIPNodeAddressSets([]string{node1IPv4, node2IPv4}) namespaceAddressSetv4, _ := buildNamespaceAddressSets(eipNamespace, []string{egressPodIP[0].String()}) @@ -7340,22 +7319,24 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { podEIPSNAT, podReRoutePolicy, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "reroute-UUID1", - "default-no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"}, + "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, }, node1GR, node2GR, node1LSP, node2LSP, @@ -7375,9 +7356,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalSwitch{ UUID: types.ExternalSwitchPrefix + node1Name + "-UUID", @@ -7389,8 +7371,8 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: types.ExternalSwitchPrefix + node2Name, Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name + "-UUID"}, }, - getNoReRouteReplyTrafficPolicy(), - getDefaultQoSRule(false), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressIPServedPodsASv4, egressNodeIPsASv4, @@ -7411,13 +7393,13 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { PortSecurity: []string{podAddr}, } node1Switch.Ports = []string{podLSP.UUID} - node1Switch.QOSRules = []string{"egressip-QoS-UUID"} - node2Switch.QOSRules = []string{"egressip-QoS-UUID"} + node1Switch.QOSRules = []string{"default-QoS-UUID"} + node2Switch.QOSRules = []string{"default-QoS-UUID"} finalDatabaseStatewithPod := append(expectedDatabaseStatewithPod, podLSP) if node1Zone == "remote" { // policy is not visible since podNode is in remote zone finalDatabaseStatewithPod[4].(*nbdb.LogicalRouter).Policies = []string{"no-reroute-UUID", "no-reroute-service-UUID", - "default-no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"} + "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic"} finalDatabaseStatewithPod = finalDatabaseStatewithPod[2:] podEIPSNAT.ExternalIP = "192.168.126.12" // EIP SNAT is not visible since podNode is remote, SNAT towards nodeIP is visible. podEIPSNAT.LogicalPort = nil @@ -7460,7 +7442,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { EgressIP: egressIP2, }, } - err = fakeOvn.controller.patchReplaceEgressIPStatus(egressIPName, status) + err = fakeOvn.controller.eIPC.patchReplaceEgressIPStatus(egressIPName, status) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // ensure secondIP from first object gets assigned to node2 @@ -7470,12 +7452,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { gomega.Expect(egressIPs1[1]).To(gomega.Equal(egressIP2)) podEIPSNAT2 := &nbdb.NAT{ - UUID: "egressip-nat-UUID2", - LogicalIP: egressPodIP[0].String(), - ExternalIP: egressIPs1[1], - ExternalIDs: map[string]string{ - "name": pas.egressIPName, - }, + UUID: "egressip-nat-UUID2", + LogicalIP: egressPodIP[0].String(), + ExternalIP: egressIPs1[1], + ExternalIDs: getEgressIPNATDbIDs(pas.egressIPName, egressPod1.Namespace, egressPod1.Name, IPFamilyValueV4, fakeOvn.controller.controllerName).GetExternalIDs(), Type: nbdb.NATTypeSNAT, LogicalPort: utilpointer.String("k8s-node2"), Options: map[string]string{ @@ -7514,9 +7494,9 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { // replicates controller startup state fakeOvn.controller.eIPC.podAssignmentMutex.Unlock() - egressIPCache, err := fakeOvn.controller.generateCacheForEgressIP() + egressIPCache, err := fakeOvn.controller.eIPC.generateCacheForEgressIP() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = fakeOvn.controller.syncPodAssignmentCache(egressIPCache) + err = fakeOvn.controller.eIPC.syncPodAssignmentCache(egressIPCache) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pas = getPodAssignmentState(&egressPod1) @@ -7558,7 +7538,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { EgressIP: egressIP3, }, } - err = fakeOvn.controller.patchReplaceEgressIPStatus(egressIP2Name, status) + err = fakeOvn.controller.eIPC.patchReplaceEgressIPStatus(egressIP2Name, status) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Eventually(func(g gomega.Gomega) { @@ -7655,12 +7635,8 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { finalDatabaseStatewithPod = expectedDatabaseStatewithPod finalDatabaseStatewithPod = append(expectedDatabaseStatewithPod, podLSP) podEIPSNAT.ExternalIP = egressIP3 - podEIPSNAT.ExternalIDs = map[string]string{ - "name": egressIP2Name, - } - podReRoutePolicy.ExternalIDs = map[string]string{ - "name": egressIP2Name, - } + podEIPSNAT.ExternalIDs = getEgressIPNATDbIDs(egressIP2Name, egressPod1.Namespace, egressPod1.Name, IPFamilyValueV4, DefaultNetworkControllerName).GetExternalIDs() + podReRoutePolicy.ExternalIDs = getEgressIPLRPReRouteDbIDs(egressIP2Name, egressPod1.Namespace, egressPod1.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs() if assginedNodeForEIPObj2 == node2.Name { podEIPSNAT.LogicalPort = utilpointer.String("k8s-node2") finalDatabaseStatewithPod = append(finalDatabaseStatewithPod, podNodeSNAT) @@ -7675,7 +7651,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { if node1Zone == "remote" { // policy is not visible since podNode is in remote zone finalDatabaseStatewithPod[4].(*nbdb.LogicalRouter).Policies = []string{"no-reroute-UUID", "no-reroute-service-UUID", - "default-no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"} + "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic"} finalDatabaseStatewithPod = finalDatabaseStatewithPod[2:] podEIPSNAT.ExternalIP = "192.168.126.12" // EIP SNAT is not visible since podNode is remote, SNAT towards nodeIP is visible. podEIPSNAT.LogicalPort = nil @@ -7698,9 +7674,9 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { fakeOvn.controller.eIPC.podAssignment = make(map[string]*podAssignmentState) // replicates controller startup state fakeOvn.controller.eIPC.podAssignmentMutex.Unlock() - egressIPCache, err = fakeOvn.controller.generateCacheForEgressIP() + egressIPCache, err = fakeOvn.controller.eIPC.generateCacheForEgressIP() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = fakeOvn.controller.syncPodAssignmentCache(egressIPCache) + err = fakeOvn.controller.eIPC.syncPodAssignmentCache(egressIPCache) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // we don't have any egressIPs, so cache is nil @@ -7736,7 +7712,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { nodeIPv6 := "::feff:c0a8:8e0c" nodeIPv6CIDR := nodeIPv6 + "/64" - node := v1.Node{ + node := corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: node1Name, Annotations: map[string]string{ @@ -7745,11 +7721,11 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\",\"%s\"]", nodeIPv4CIDR, nodeIPv6CIDR), }, }, - Status: v1.NodeStatus{ - Conditions: []v1.NodeCondition{ + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ { - Type: v1.NodeReady, - Status: v1.ConditionTrue, + Type: corev1.NodeReady, + Status: corev1.ConditionTrue, }, }, }, @@ -7800,8 +7776,8 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { &egressipv1.EgressIPList{ Items: []egressipv1.EgressIP{eIP}, }, - &v1.NodeList{ - Items: []v1.Node{node}, + &corev1.NodeList{ + Items: []corev1.Node{node}, }) err := fakeOvn.controller.WatchEgressIPNamespaces() @@ -7813,10 +7789,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { err = fakeOvn.controller.WatchEgressIP() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - node1Switch.QOSRules = []string{"egressip-QoS-UUID", "egressip-QoSv6-UUID"} + node1Switch.QOSRules = []string{"default-QoS-UUID", "default-QoSv6-UUID"} - egressSVCServedPodsASv4, egressSVCServedPodsASv6 := buildEgressIPServiceAddressSets(nil) - egressIPServedPodsASv4, egressIPServedPodsASv6 := buildEgressIPServedPodsAddressSets(nil) + egressSVCServedPodsASv4, egressSVCServedPodsASv6 := buildEgressServiceAddressSets(nil) + egressIPServedPodsASv4, egressIPServedPodsASv6 := buildEgressIPServedPodsAddressSets(nil, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) egressNodeIPsASv4, egressNodeIPsASv6 := buildEgressIPNodeAddressSets([]string{nodeIPv4, nodeIPv6}) expectedDatabaseState := []libovsdbtest.TestData{ @@ -7824,36 +7800,40 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.LogicalRouterPolicy{ Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip6.src == $%s || ip6.src == $%s) && ip6.dst == $%s", egressIPServedPodsASv6.Name, egressSVCServedPodsASv6.Name, egressNodeIPsASv6.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-v6-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-v6-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV6, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", - "default-v6-no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"}, + "default-v6-no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node.Name, @@ -7875,8 +7855,8 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node.Name + "UUID"}, }, node1Switch, - getDefaultQoSRule(false), - getDefaultQoSRule(true), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), + getDefaultQoSRule(true, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressSVCServedPodsASv6, egressIPServedPodsASv4, egressIPServedPodsASv6, egressNodeIPsASv4, egressNodeIPsASv6, @@ -7905,36 +7885,40 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.LogicalRouterPolicy{ Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip6.src == $%s || ip6.src == $%s) && ip6.dst == $%s", egressIPServedPodsASv6.Name, egressSVCServedPodsASv6.Name, egressNodeIPsASv6.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-v6-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-v6-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV6, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", - "default-v6-no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"}, + "default-v6-no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node.Name, @@ -7956,8 +7940,8 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node.Name + "UUID"}, }, node1Switch, - getDefaultQoSRule(false), - getDefaultQoSRule(true), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), + getDefaultQoSRule(true, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressSVCServedPodsASv6, egressIPServedPodsASv4, egressIPServedPodsASv6, egressNodeIPsASv4, egressNodeIPsASv6, @@ -8064,8 +8048,8 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { &egressipv1.EgressIPList{ Items: []egressipv1.EgressIP{eIP}, }, - &v1.NodeList{ - Items: []v1.Node{node1, node2}, + &corev1.NodeList{ + Items: []corev1.Node{node1, node2}, }) err := fakeOvn.controller.WatchEgressIPNamespaces() @@ -8077,11 +8061,11 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { err = fakeOvn.controller.WatchEgressIP() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - node1Switch.QOSRules = []string{"egressip-QoS-UUID"} - node2Switch.QOSRules = []string{"egressip-QoS-UUID"} + node1Switch.QOSRules = []string{"default-QoS-UUID"} + node2Switch.QOSRules = []string{"default-QoS-UUID"} - egressSVCServedPodsASv4, _ := buildEgressIPServiceAddressSets(nil) - egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets(nil) + egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) + egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets(nil, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) egressNodeIPsASv4, _ := buildEgressIPNodeAddressSets([]string{node1IPv4, node2IPv4}) expectedDatabaseState := []libovsdbtest.TestData{ @@ -8089,27 +8073,30 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", - Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"}, + Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node1.Name, @@ -8151,7 +8138,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, node1Switch, node2Switch, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressIPServedPodsASv4, egressNodeIPsASv4, @@ -8260,8 +8247,8 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { &egressipv1.EgressIPList{ Items: []egressipv1.EgressIP{eIP}, }, - &v1.NodeList{ - Items: []v1.Node{node1, node2}, + &corev1.NodeList{ + Items: []corev1.Node{node1, node2}, }) err := fakeOvn.controller.WatchEgressIPNamespaces() @@ -8273,11 +8260,11 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { err = fakeOvn.controller.WatchEgressIP() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - node1Switch.QOSRules = []string{"egressip-QoS-UUID"} - node2Switch.QOSRules = []string{"egressip-QoS-UUID"} + node1Switch.QOSRules = []string{"default-QoS-UUID"} + node2Switch.QOSRules = []string{"default-QoS-UUID"} - egressSVCServedPodsASv4, _ := buildEgressIPServiceAddressSets(nil) - egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets(nil) + egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) + egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets(nil, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) egressNodeIPsASv4, _ := buildEgressIPNodeAddressSets([]string{node1IPv4, node2IPv4}) expectedDatabaseState := []libovsdbtest.TestData{ @@ -8285,27 +8272,30 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", - Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"}, + Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node1.Name, @@ -8347,7 +8337,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, node1Switch, node2Switch, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressIPServedPodsASv4, egressNodeIPsASv4, @@ -8366,28 +8356,31 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", - "egressip-no-reroute-reply-traffic"}, + "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node1.Name, @@ -8429,7 +8422,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, node1Switch, node2Switch, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressIPServedPodsASv4, egressSVCServedPodsASv4, egressNodeIPsASv4, @@ -8565,14 +8558,14 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { &egressipv1.EgressIPList{ Items: []egressipv1.EgressIP{eIP}, }, - &v1.NodeList{ - Items: []v1.Node{node1, node2}, + &corev1.NodeList{ + Items: []corev1.Node{node1, node2}, }, - &v1.NamespaceList{ - Items: []v1.Namespace{*egressNamespace}, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace}, }, - &v1.PodList{ - Items: []v1.Pod{egressPod}, + &corev1.PodList{ + Items: []corev1.Pod{egressPod}, }, ) @@ -8595,11 +8588,11 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { gomega.Eventually(getEgressIPReassignmentCount).Should(gomega.Equal(0)) expectedNatLogicalPort := "k8s-node2" - node1Switch.QOSRules = []string{"egressip-QoS-UUID"} - node2Switch.QOSRules = []string{"egressip-QoS-UUID"} + node1Switch.QOSRules = []string{"default-QoS-UUID"} + node2Switch.QOSRules = []string{"default-QoS-UUID"} - egressSVCServedPodsASv4, _ := buildEgressIPServiceAddressSets(nil) - egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}) + egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) + egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) egressNodeIPsASv4, _ := buildEgressIPNodeAddressSets([]string{node1IPv4, node2IPv4}) expectedDatabaseState := []libovsdbtest.TestData{ @@ -8607,40 +8600,39 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.EgressIPReroutePriority, - Match: fmt.Sprintf("ip4.src == %s", egressPod.Status.PodIP), - Action: nbdb.LogicalRouterPolicyActionReroute, - Nexthops: node2LogicalRouterIPv4, - ExternalIDs: map[string]string{ - "name": eIP.Name, - }, - UUID: "reroute-UUID", + Priority: types.EgressIPReroutePriority, + Match: fmt.Sprintf("ip4.src == %s", egressPod.Status.PodIP), + Action: nbdb.LogicalRouterPolicyActionReroute, + Nexthops: node2LogicalRouterIPv4, + ExternalIDs: getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + UUID: "reroute-UUID", }, &nbdb.NAT{ - UUID: "egressip-nat-UUID", - LogicalIP: podV4IP, - ExternalIP: egressIP1, - ExternalIDs: map[string]string{ - "name": egressIPName, - }, + UUID: "egressip-nat-UUID", + LogicalIP: podV4IP, + ExternalIP: egressIP1, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod.Namespace, egressPod.Name, IPFamilyValueV4, fakeOvn.controller.controllerName).GetExternalIDs(), Type: nbdb.NATTypeSNAT, LogicalPort: &expectedNatLogicalPort, Options: map[string]string{ @@ -8651,7 +8643,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", Policies: []string{"reroute-UUID", "default-no-reroute-UUID", "no-reroute-service-UUID", - "default-no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"}, + "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node1.Name, @@ -8706,7 +8698,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, node1Switch, node2Switch, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressIPServedPodsASv4, egressNodeIPsASv4, @@ -8768,19 +8760,18 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { libovsdbtest.TestSetup{ NBData: []libovsdbtest.TestData{ &nbdb.LogicalRouterPolicy{ - UUID: "keep-me-UUID", - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Priority: types.DefaultNoRereoutePriority, - Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "keep-me-UUID", + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Priority: types.DefaultNoRereoutePriority, + Action: nbdb.LogicalRouterPolicyActionAllow, + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - UUID: "remove-me-UUID", - ExternalIDs: map[string]string{ - "name": eIP.Name, - }, - Match: "ip.src == 10.128.3.8", - Priority: types.EgressIPReroutePriority, - Action: nbdb.LogicalRouterPolicyActionReroute, + UUID: "remove-me-UUID", + ExternalIDs: getEgressIPLRPReRouteDbIDs(eIP.Name, egressNamespace.Name, "doesnt-exist-pod", IPFamilyValueV4, types.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), + Match: "ip.src == 10.128.3.8", + Priority: types.EgressIPReroutePriority, + Action: nbdb.LogicalRouterPolicyActionReroute, }, &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, @@ -8793,12 +8784,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Nat: []string{"egressip-nat-UUID"}, }, &nbdb.NAT{ - UUID: "egressip-nat-UUID", - LogicalIP: podV4IP, - ExternalIP: egressIP1, - ExternalIDs: map[string]string{ - "name": egressIPName, - }, + UUID: "egressip-nat-UUID", + LogicalIP: podV4IP, + ExternalIP: egressIP1, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, eipNamespace, podName, IPFamilyValueV4, DefaultNetworkControllerName).GetExternalIDs(), Type: nbdb.NATTypeSNAT, LogicalPort: &expectedNatLogicalPort, Options: map[string]string{ @@ -8823,19 +8812,19 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { &nbdb.LogicalSwitch{ UUID: node1Name + "-UUID", Name: node1Name, - QOSRules: []string{"egressip-QoS-UUID"}, + QOSRules: []string{"default-QoS-UUID"}, }, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, DefaultNetworkControllerName), }, }, &egressipv1.EgressIPList{ Items: []egressipv1.EgressIP{eIP}, }, - &v1.NodeList{ - Items: []v1.Node{node1}, + &corev1.NodeList{ + Items: []corev1.Node{node1}, }, - &v1.NamespaceList{ - Items: []v1.Namespace{*egressNamespace}, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace}, }, ) @@ -8850,8 +8839,8 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { gomega.Eventually(getEgressIPStatusLen(egressIPName)).Should(gomega.Equal(1)) gomega.Eventually(getEgressIPReassignmentCount).Should(gomega.Equal(0)) - egressSVCServedPodsASv4, _ := buildEgressIPServiceAddressSets(nil) - egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets(nil) + egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) + egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets(nil, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) egressNodeIPsASv4, _ := buildEgressIPNodeAddressSets([]string{node1IPv4}) expectedDatabaseState := []libovsdbtest.TestData{ @@ -8859,28 +8848,31 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.LogicalRouterPolicy{ - UUID: "keep-me-UUID", - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Priority: types.DefaultNoRereoutePriority, - Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "keep-me-UUID", + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Priority: types.DefaultNoRereoutePriority, + Action: nbdb.LogicalRouterPolicyActionAllow, + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", Policies: []string{"keep-me-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", - "egressip-no-reroute-reply-traffic"}, + "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node1.Name, @@ -8904,9 +8896,9 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { &nbdb.LogicalSwitch{ UUID: node1Name + "-UUID", Name: node1Name, - QOSRules: []string{"egressip-QoS-UUID"}, + QOSRules: []string{"default-QoS-UUID"}, }, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressIPServedPodsASv4, egressNodeIPsASv4, @@ -8997,12 +8989,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { // This is unexpected snat entry where its logical port refers to an unavailable node // and ensure this entry is removed as soon as ovnk master is up and running. &nbdb.NAT{ - UUID: "egressip-nat-UUID2", - LogicalIP: podV4IP, - ExternalIP: egressIP, - ExternalIDs: map[string]string{ - "name": egressIPName, - }, + UUID: "egressip-nat-UUID2", + LogicalIP: podV4IP, + ExternalIP: egressIP, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, eipNamespace, podName, IPFamilyValueV4, DefaultNetworkControllerName).GetExternalIDs(), Type: nbdb.NATTypeSNAT, LogicalPort: utilpointer.String("k8s-node2"), Options: map[string]string{ @@ -9019,14 +9009,14 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { &egressipv1.EgressIPList{ Items: []egressipv1.EgressIP{eIP}, }, - &v1.NodeList{ - Items: []v1.Node{node1}, + &corev1.NodeList{ + Items: []corev1.Node{node1}, }, - &v1.NamespaceList{ - Items: []v1.Namespace{*egressNamespace}, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace}, }, - &v1.PodList{ - Items: []v1.Pod{egressPod}, + &corev1.PodList{ + Items: []corev1.Pod{egressPod}, }, ) @@ -9063,40 +9053,44 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { gomega.Expect(nodes[0]).To(gomega.Equal(node1.Name)) gomega.Expect(egressIPs[0]).To(gomega.Equal(egressIP)) - podEIPSNAT := getEIPSNAT(podV4IP, egressIP, "k8s-node1") - podReRoutePolicy := getReRoutePolicy(egressPodIP[0].String(), "4", "reroute-UUID", nodeLogicalRouterIPv4, eipExternalID) + podEIPSNAT := getEIPSNAT(podV4IP, egressPod.Namespace, egressPod.Name, egressIP, "k8s-node1", DefaultNetworkControllerName) + podReRoutePolicy := getReRoutePolicy(egressPodIP[0].String(), "4", "reroute-UUID", nodeLogicalRouterIPv4, + getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs()) node1GR.Nat = []string{"egressip-nat-UUID"} - node1Switch.QOSRules = []string{"egressip-QoS-UUID"} + node1Switch.QOSRules = []string{"default-QoS-UUID"} - egressSVCServedPodsASv4, _ := buildEgressIPServiceAddressSets(nil) - egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}) + egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) + egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) egressNodeIPsASv4, _ := buildEgressIPNodeAddressSets([]string{node1IPv4}) expectedDatabaseStatewithPod := []libovsdbtest.TestData{ &nbdb.LogicalRouterPolicy{ Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), podEIPSNAT, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, podReRoutePolicy, &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "reroute-UUID", "default-no-reroute-node-UUID", - "egressip-no-reroute-reply-traffic"}, + "default-no-reroute-reply-traffic"}, }, node1GR, node1LSP, &nbdb.LogicalRouterPort{ UUID: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1.Name + "-UUID", @@ -9108,7 +9102,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: types.ExternalSwitchPrefix + node1Name, Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name + "-UUID"}, }, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressIPServedPodsASv4, egressNodeIPsASv4, @@ -9217,8 +9211,8 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { &egressipv1.EgressIPList{ Items: []egressipv1.EgressIP{eIP}, }, - &v1.NodeList{ - Items: []v1.Node{node1, node2}, + &corev1.NodeList{ + Items: []corev1.Node{node1, node2}, }) err := fakeOvn.controller.WatchEgressIPNamespaces() @@ -9230,38 +9224,41 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { err = fakeOvn.controller.WatchEgressIP() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - egressSVCServedPodsASv4, _ := buildEgressIPServiceAddressSets(nil) - egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets(nil) + egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) + egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets(nil, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) egressNodeIPsASv4, _ := buildEgressIPNodeAddressSets([]string{node1IPv4, node2IPv4}) - node1Switch.QOSRules = []string{"egressip-QoS-UUID"} - node2Switch.QOSRules = []string{"egressip-QoS-UUID"} + node1Switch.QOSRules = []string{"default-QoS-UUID"} + node2Switch.QOSRules = []string{"default-QoS-UUID"} expectedDatabaseState := []libovsdbtest.TestData{ &nbdb.LogicalRouterPolicy{ Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", - Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"}, + Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node1.Name, @@ -9302,7 +9299,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name + "-UUID"}, }, node1Switch, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), node2Switch, egressSVCServedPodsASv4, egressIPServedPodsASv4, @@ -9322,27 +9319,30 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", - Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"}, + Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node1.Name, @@ -9383,7 +9383,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name + "-UUID"}, }, node1Switch, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), node2Switch, egressSVCServedPodsASv4, egressIPServedPodsASv4, @@ -9413,27 +9413,30 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", - Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"}, + Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node1.Name, @@ -9474,7 +9477,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name + "-UUID"}, }, node1Switch, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), node2Switch, egressSVCServedPodsASv4, egressIPServedPodsASv4, @@ -9583,8 +9586,8 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { &egressipv1.EgressIPList{ Items: []egressipv1.EgressIP{eIP}, }, - &v1.NodeList{ - Items: []v1.Node{node1, node2}, + &corev1.NodeList{ + Items: []corev1.Node{node1, node2}, }) err := fakeOvn.controller.WatchEgressIPNamespaces() @@ -9596,11 +9599,11 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { err = fakeOvn.controller.WatchEgressIP() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - node1Switch.QOSRules = []string{"egressip-QoS-UUID"} - node2Switch.QOSRules = []string{"egressip-QoS-UUID"} + node1Switch.QOSRules = []string{"default-QoS-UUID"} + node2Switch.QOSRules = []string{"default-QoS-UUID"} - egressSVCServedPodsASv4, _ := buildEgressIPServiceAddressSets(nil) - egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets(nil) + egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) + egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets(nil, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) egressNodeIPsASv4, _ := buildEgressIPNodeAddressSets([]string{node1IPv4, node2IPv4}) expectedDatabaseState := []libovsdbtest.TestData{ @@ -9608,27 +9611,30 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", - Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"}, + Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node1.Name, @@ -9669,7 +9675,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name + "-UUID"}, }, node1Switch, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), node2Switch, egressSVCServedPodsASv4, egressIPServedPodsASv4, @@ -9690,27 +9696,30 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", - Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"}, + Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node1.Name, @@ -9751,7 +9760,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name + "-UUID"}, }, node1Switch, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), node2Switch, egressSVCServedPodsASv4, egressIPServedPodsASv4, @@ -9787,7 +9796,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { EgressIP: egressIP2, }, } - err = fakeOvn.controller.patchReplaceEgressIPStatus(egressIPName, status) + err = fakeOvn.controller.eIPC.patchReplaceEgressIPStatus(egressIPName, status) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Eventually(getEgressIPStatusLen(egressIPName)).Should(gomega.Equal(2)) @@ -9798,27 +9807,30 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", - Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"}, + Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node1.Name, @@ -9859,7 +9871,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name + "-UUID"}, }, node1Switch, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), node2Switch, egressSVCServedPodsASv4, egressIPServedPodsASv4, @@ -9882,10 +9894,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { node1IPv4CIDR := node1IPv4 + "/24" egressPod := *newPodWithLabels(eipNamespace, podName, node1Name, podV4IP, egressPodLabel) - egressPod.Status.Phase = v1.PodSucceeded + egressPod.Status.Phase = corev1.PodSucceeded egressNamespace := newNamespace(eipNamespace) - node1 := v1.Node{ + node1 := corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: node1Name, Annotations: map[string]string{ @@ -9899,11 +9911,11 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { "k8s.ovn.org/egress-assignable": "", }, }, - Status: v1.NodeStatus{ - Conditions: []v1.NodeCondition{ + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ { - Type: v1.NodeReady, - Status: v1.ConditionTrue, + Type: corev1.NodeReady, + Status: corev1.ConditionTrue, }, }, }, @@ -9971,14 +9983,14 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { &egressipv1.EgressIPList{ Items: []egressipv1.EgressIP{eIP}, }, - &v1.NodeList{ - Items: []v1.Node{node1}, + &corev1.NodeList{ + Items: []corev1.Node{node1}, }, - &v1.NamespaceList{ - Items: []v1.Namespace{*egressNamespace}, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace}, }, - &v1.PodList{ - Items: []v1.Pod{egressPod}, + &corev1.PodList{ + Items: []corev1.Pod{egressPod}, }, ) i, n, _ := net.ParseCIDR(podV4IP + "/23") @@ -10014,35 +10026,38 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { gomega.Expect(nodes[0]).To(gomega.Equal(node1.Name)) gomega.Expect(egressIPs[0]).To(gomega.Equal(egressIP)) - egressSVCServedPodsASv4, _ := buildEgressIPServiceAddressSets(nil) - egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets(nil) + egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) + egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets(nil, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) egressNodeIPsASv4, _ := buildEgressIPNodeAddressSets([]string{node1IPv4}) - node1Switch.QOSRules = []string{"egressip-QoS-UUID"} + node1Switch.QOSRules = []string{"default-QoS-UUID"} expectedDatabaseStatewithPod := []libovsdbtest.TestData{ &nbdb.LogicalRouterPolicy{ Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", - Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"}, + Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, }, node1GR, node1LSP, &nbdb.LogicalRouterPort{ UUID: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1.Name + "-UUID", @@ -10054,7 +10069,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: types.ExternalSwitchPrefix + node1Name, Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name + "-UUID"}, }, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressIPServedPodsASv4, egressNodeIPsASv4, @@ -10079,7 +10094,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { egressPod := *newPodWithLabels(eipNamespace, podName, node1Name, podV4IP, egressPodLabel) egressNamespace := newNamespace(eipNamespace) - node1 := v1.Node{ + node1 := corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: node1Name, Annotations: map[string]string{ @@ -10093,11 +10108,11 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { "k8s.ovn.org/egress-assignable": "", }, }, - Status: v1.NodeStatus{ - Conditions: []v1.NodeCondition{ + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ { - Type: v1.NodeReady, - Status: v1.ConditionTrue, + Type: corev1.NodeReady, + Status: corev1.ConditionTrue, }, }, }, @@ -10165,14 +10180,14 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { &egressipv1.EgressIPList{ Items: []egressipv1.EgressIP{eIP}, }, - &v1.NodeList{ - Items: []v1.Node{node1}, + &corev1.NodeList{ + Items: []corev1.Node{node1}, }, - &v1.NamespaceList{ - Items: []v1.Namespace{*egressNamespace}, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace}, }, - &v1.PodList{ - Items: []v1.Pod{egressPod}, + &corev1.PodList{ + Items: []corev1.Pod{egressPod}, }, ) @@ -10211,12 +10226,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { gomega.Expect(egressIPs[0]).To(gomega.Equal(egressIP)) podEIPSNAT := &nbdb.NAT{ - UUID: "egressip-nat-UUID1", - LogicalIP: podV4IP, - ExternalIP: egressIP, - ExternalIDs: map[string]string{ - "name": egressIPName, - }, + UUID: "egressip-nat-UUID1", + LogicalIP: podV4IP, + ExternalIP: egressIP, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod.Namespace, egressPod.Name, IPFamilyValueV4, fakeOvn.controller.controllerName).GetExternalIDs(), Type: nbdb.NATTypeSNAT, LogicalPort: utilpointer.StringPtr("k8s-node1"), Options: map[string]string{ @@ -10224,46 +10237,47 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, } podReRoutePolicy := &nbdb.LogicalRouterPolicy{ - Priority: types.EgressIPReroutePriority, - Match: fmt.Sprintf("ip4.src == %s", egressPodIP[0].String()), - Action: nbdb.LogicalRouterPolicyActionReroute, - Nexthops: nodeLogicalRouterIPv4, - ExternalIDs: map[string]string{ - "name": egressIPName, - }, - UUID: "reroute-UUID1", + Priority: types.EgressIPReroutePriority, + Match: fmt.Sprintf("ip4.src == %s", egressPodIP[0].String()), + Action: nbdb.LogicalRouterPolicyActionReroute, + Nexthops: nodeLogicalRouterIPv4, + ExternalIDs: getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + UUID: "reroute-UUID1", } node1GR.Nat = []string{"egressip-nat-UUID1"} - egressSVCServedPodsASv4, _ := buildEgressIPServiceAddressSets(nil) - egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}) + egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) + egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) egressNodeIPsASv4, _ := buildEgressIPNodeAddressSets([]string{node1IPv4}) - node1Switch.QOSRules = []string{"egressip-QoS-UUID"} + node1Switch.QOSRules = []string{"default-QoS-UUID"} expectedDatabaseStatewithPod := []libovsdbtest.TestData{ &nbdb.LogicalRouterPolicy{ Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), podEIPSNAT, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, podReRoutePolicy, &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "reroute-UUID1", "default-no-reroute-node-UUID", - "egressip-no-reroute-reply-traffic"}, + "default-no-reroute-reply-traffic"}, }, node1GR, node1LSP, &nbdb.LogicalRouterPort{ UUID: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1.Name + "-UUID", @@ -10275,7 +10289,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: types.ExternalSwitchPrefix + node1Name, Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name + "-UUID"}, }, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressIPServedPodsASv4, egressNodeIPsASv4, @@ -10283,42 +10297,45 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseStatewithPod)) - egressPod.Status.Phase = v1.PodSucceeded + egressPod.Status.Phase = corev1.PodSucceeded _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(egressPod.Namespace).Update(context.TODO(), &egressPod, metav1.UpdateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // Wait for pod to get moved into succeeded state. - gomega.Eventually(func() v1.PodPhase { + gomega.Eventually(func() corev1.PodPhase { egressPod1, _ := fakeOvn.watcher.GetPod(egressPod.Namespace, egressPod.Name) return egressPod1.Status.Phase - }, 5).Should(gomega.Equal(v1.PodSucceeded)) + }, 5).Should(gomega.Equal(corev1.PodSucceeded)) node1GR.Nat = []string{} - egressIPServedPodsASv4, _ = buildEgressIPServedPodsAddressSets(nil) + egressIPServedPodsASv4, _ = buildEgressIPServedPodsAddressSets(nil, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) expectedDatabaseStatewitCompletedPod := []libovsdbtest.TestData{ &nbdb.LogicalRouterPolicy{ Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", - Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"}, + Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, }, node1GR, node1LSP, &nbdb.LogicalRouterPort{ UUID: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1.Name + "-UUID", @@ -10330,7 +10347,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: types.ExternalSwitchPrefix + node1Name, Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name + "-UUID"}, }, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressIPServedPodsASv4, egressNodeIPsASv4, @@ -10345,6 +10362,331 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) + ginkgo.DescribeTable( + "DualStack cluster with single stack egressIP removes the correct snat rule when DisableSNATMultipleGWs=true", + func( + egressIP net.IP, + ) { + app.Action = func(ctx *cli.Context) error { + ctx.Set("enable-egress-ip", "true") + ctx.Set("egressip-node-healthcheck-port", "1234") + + ctx.Set("disable-snat-multiple-gws", "true") + ctx.Set("cluster-subnets", fmt.Sprintf("%s/%d,%s/%d", "10.0.0.0/16", 24, "fd01::/48", 64)) + ctx.Set("k8s-service-cidrs", fmt.Sprintf("%s,%s", "172.30.0.0/16", "fd02::/112")) + + _, err := config.InitConfig(ctx, nil, nil) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + isEgressIPv6 := utilnet.IsIPv6(egressIP) + node1IPv4 := "192.168.126.12" + node1IPv4Net := "192.168.126.0/24" + node1IPv4CIDR := node1IPv4 + "/24" + expectedNatLogicalPort := "k8s-node1" + + egressPod := *newPodWithLabelsAllIPFamilies(eipNamespace, podName, node1Name, []string{podV4IP, podV6IP}, egressPodLabel) + egressNamespace := newNamespace(eipNamespace) + + node1 := corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: node1Name, + Annotations: map[string]string{ + "k8s.ovn.org/node-primary-ifaddr": fmt.Sprintf("{\"ipv4\": \"%s\", \"ipv6\": \"fc00:f853:ccd:e793::13/64\"}", node1IPv4CIDR), + "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":[\"%s\", \"%s\"]}", v4Node1Subnet, v6Node1Subnet), + "k8s.ovn.org/l3-gateway-config": "{\"default\":{\"mode\":\"shared\",\"bridge-id\":\"breth0\",\"interface-id\":\"breth0_ovn-worker\",\"mac-address\":\"3a:24:24:5d:85:aa\",\"ip-addresses\":[\"10.89.0.19/24\",\"fc00:f853:ccd:e793::13/64\"],\"next-hops\":[\"10.89.0.1\",\"fc00:f853:ccd:e793::1\"],\"node-port-enable\":\"true\",\"vlan-id\":\"0\"}}", + "k8s.ovn.org/node-chassis-id": "79fdcfc4-6fe6-4cd3-8242-c0f85a4668ec", + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\",\"%s\"]", node1IPv4CIDR, "fc00:f853:ccd:e793::13/64"), + }, + Labels: map[string]string{ + "k8s.ovn.org/egress-assignable": "", + }, + }, + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ + { + Type: corev1.NodeReady, + Status: corev1.ConditionTrue, + }, + }, + }, + } + + eIP := egressipv1.EgressIP{ + ObjectMeta: newEgressIPMeta(egressIPName), + Spec: egressipv1.EgressIPSpec{ + EgressIPs: []string{egressIP.String()}, + PodSelector: metav1.LabelSelector{ + MatchLabels: egressPodLabel, + }, + NamespaceSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "name": egressNamespace.Name, + }, + }, + }, + Status: egressipv1.EgressIPStatus{ + Items: []egressipv1.EgressIPStatusItem{}, + }, + } + + node1Switch := &nbdb.LogicalSwitch{ + UUID: node1.Name + "-UUID", + Name: node1.Name, + } + node1GR := &nbdb.LogicalRouter{ + Name: types.GWRouterPrefix + node1.Name, + UUID: types.GWRouterPrefix + node1.Name + "-UUID", + Ports: []string{types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1.Name + "-UUID"}, + Nat: []string{"egressip-nat-v4-UUID", "egressip-nat-v6-UUID"}, + } + node1LSP := &nbdb.LogicalSwitchPort{ + UUID: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name + "-UUID", + Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, + Type: "router", + Options: map[string]string{ + "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + "nat-addresses": "router", + "exclude-lb-vips-from-garp": "true", + }, + } + egressipv4NAT := &nbdb.NAT{ + UUID: "egressip-nat-v4-UUID", + LogicalIP: podV4IP, + ExternalIP: "10.89.0.19", // nodes ip address + Type: nbdb.NATTypeSNAT, + LogicalPort: &expectedNatLogicalPort, + Options: map[string]string{ + "stateless": "false", + }, + } + egressipv6NAT := &nbdb.NAT{ + UUID: "egressip-nat-v6-UUID", + LogicalIP: podV6IP, + ExternalIP: "fc00:f853:ccd:e793::13", + Type: nbdb.NATTypeSNAT, + LogicalPort: &expectedNatLogicalPort, + Options: map[string]string{ + "stateless": "false", + }, + } + // we only want to remove the SNAT of the podIP that matches the ip family of the EgressIP + natToRemain := egressipv6NAT + if isEgressIPv6 { + natToRemain = egressipv4NAT + } + + fakeOvn.startWithDBSetup( + libovsdbtest.TestSetup{ + NBData: []libovsdbtest.TestData{ + &nbdb.LogicalRouter{ + Name: types.OVNClusterRouter, + UUID: types.OVNClusterRouter + "-UUID", + }, + node1GR, + node1LSP, + &nbdb.LogicalRouterPort{ + UUID: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1.Name + "-UUID", + Name: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1.Name, + Networks: []string{nodeLogicalRouterIfAddrV4, nodeLogicalRouterIfAddrV6}, + }, + node1Switch, + &nbdb.LogicalSwitch{ + UUID: types.ExternalSwitchPrefix + node1Name + "-UUID", + Name: types.ExternalSwitchPrefix + node1Name, + Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name + "-UUID"}, + }, + egressipv6NAT, + egressipv4NAT, + }, + }, + &egressipv1.EgressIPList{ + Items: []egressipv1.EgressIP{eIP}, + }, + &corev1.NodeList{ + Items: []corev1.Node{node1}, + }, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace}, + }, + &corev1.PodList{ + Items: []corev1.Pod{egressPod}, + }, + ) + + i, n, _ := net.ParseCIDR(podV4IP + "/23") + n.IP = i + ipv6, ipv6net, _ := net.ParseCIDR(podV6IP + "/23") + ipv6net.IP = ipv6 + fakeOvn.controller.logicalPortCache.add(&egressPod, "", types.DefaultNetworkName, "", nil, []*net.IPNet{n, ipv6net}) + + err = fakeOvn.controller.WatchPods() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressIPNamespaces() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressIPPods() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressNodes() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressIP() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + if !isEgressIPv6 { + fakeOvn.patchEgressIPObj(node1Name, egressIPName, egressIP.String(), node1IPv4Net) + } else { + fakeOvn.patchEgressIPObj(node1Name, egressIPName, egressIP.String(), "fc00:f853:ccd:e793::0/64") + } + + egressPodPortInfo, err := fakeOvn.controller.logicalPortCache.get(&egressPod, types.DefaultNetworkName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ePod, err := fakeOvn.fakeClient.KubeClient.CoreV1().Pods(egressPod.Namespace).Get(context.TODO(), egressPod.Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + egressPodIP, err := util.GetPodIPsOfNetwork(ePod, &util.DefaultNetInfo{}) + index := 0 //ipv4 address at zero index + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + egressNetPodIP, _, err := net.ParseCIDR(egressPodPortInfo.ips[0].String()) + if isEgressIPv6 { + egressNetPodIP, _, err = net.ParseCIDR(egressPodPortInfo.ips[1].String()) + index = 1 + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(egressNetPodIP.String()).To(gomega.Equal(egressPodIP[index].String())) + gomega.Expect(egressPodPortInfo.expires.IsZero()).To(gomega.BeTrue()) + + gomega.Eventually(getEgressIPStatusLen(egressIPName)).Should(gomega.Equal(1)) + gomega.Eventually(getEgressIPReassignmentCount).Should(gomega.Equal(0)) + egressIPs, nodes := getEgressIPStatus(egressIPName) + gomega.Expect(nodes[0]).To(gomega.Equal(node1.Name)) + gomega.Expect(egressIPs[0]).To(gomega.Equal(egressIP.String())) + ipfamily := IPFamilyValueV4 + if isEgressIPv6 { + ipfamily = IPFamilyValueV6 + } + podEIPSNAT := &nbdb.NAT{ + UUID: "egressip-nat-UUID1", + ExternalIP: egressIP.String(), + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod.Namespace, egressPod.Name, ipfamily, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + Type: nbdb.NATTypeSNAT, + LogicalPort: utilpointer.StringPtr("k8s-node1"), + Options: map[string]string{ + "stateless": "false", + }, + } + if !isEgressIPv6 { + podEIPSNAT.LogicalIP = podV4IP + } else { + podEIPSNAT.LogicalIP = podV6IP + } + podReRoutePolicy := &nbdb.LogicalRouterPolicy{ + Priority: types.EgressIPReroutePriority, + Match: fmt.Sprintf("%s.src == %s", ipfamily, egressPodIP[index].String()), + Action: nbdb.LogicalRouterPolicyActionReroute, + Nexthops: nodeLogicalRouterIPv4, + ExternalIDs: getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, ipfamily, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + UUID: "reroute-UUID1", + } + if isEgressIPv6 { + podReRoutePolicy.Nexthops = []string{"fef0::56"} + } + + node1GR.Nat = []string{"egressip-nat-UUID1", natToRemain.UUID} + node1Switch.QOSRules = []string{"default-QoS-UUID", "default-QoSv6-UUID"} + ovnClusterRouterPolicies := []string{"reroute-UUID1", "default-no-reroute-reply-traffic", "no-reroute-UUID", "no-reroute-v6-UUID", + "no-reroute-service-UUID", "no-reroute-service-v6-UUID", "default-no-reroute-node-UUID", "default-no-reroute-node-v6-UUID"} + egressSVCServedPodsASv4, egressSVCServedPodsASv6 := buildEgressServiceAddressSets(nil) + egressIPServedPodsASv4, egressIPServedPodsASv6 := buildEgressIPServedPodsAddressSets([]string{podV4IP, podV6IP}, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) + egressNodeIPsASv4, egressNodeIPsASv6 := buildEgressIPNodeAddressSets([]string{node1IPv4, "fc00:f853:ccd:e793::13"}) + expectedDatabaseStatewithPod := []libovsdbtest.TestData{ + &nbdb.LogicalRouterPolicy{ + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", + egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("(ip6.src == $%s || ip6.src == $%s) && ip6.dst == $%s", + egressIPServedPodsASv6.Name, egressSVCServedPodsASv6.Name, egressNodeIPsASv6.Name), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-v6-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV6, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.0.0.0/16 && ip4.dst == 10.0.0.0/16", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: types.DefaultNoRereoutePriority, + Match: "ip6.src == fd01::/48 && ip6.dst == fd01::/48", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-v6-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV6, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.0.0.0/16 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip6.src == fd01::/48 && ip6.dst == %s", config.Gateway.V6JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-v6-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV6, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + }, + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), + podEIPSNAT, + podReRoutePolicy, &nbdb.LogicalRouter{ + Name: types.OVNClusterRouter, + UUID: types.OVNClusterRouter + "-UUID", + Policies: ovnClusterRouterPolicies, + }, node1GR, node1LSP, node1Switch, + &nbdb.LogicalSwitch{ + UUID: types.ExternalSwitchPrefix + node1Name + "-UUID", + Name: types.ExternalSwitchPrefix + node1Name, + Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name + "-UUID"}, + }, + &nbdb.LogicalRouterPort{ + UUID: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1.Name + "-UUID", + Name: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1.Name, + Networks: []string{nodeLogicalRouterIfAddrV4, nodeLogicalRouterIfAddrV6}, + }, + natToRemain, + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), + getDefaultQoSRule(true, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), + egressSVCServedPodsASv4, + egressSVCServedPodsASv6, + egressIPServedPodsASv4, + egressIPServedPodsASv6, + egressNodeIPsASv4, + egressNodeIPsASv6, + } + gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseStatewithPod)) + + return nil + } + + err := app.Run([]string{app.Name}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }, + ginkgo.Entry( + "When EgressIP is ipv4", + net.ParseIP("192.168.126.101"), + ), + ginkgo.Entry( + "When EgressIP is ipv6", + net.ParseIP("fc00:f853:0ccd:e793:ffff:ffff:ffff:0000"), + ), + ) + ginkgo.It("should ensure SNATs towards egressIP and nodeIP are correctly configured during egressIP re-assignment", func() { app.Action = func(ctx *cli.Context) error { config.Gateway.DisableSNATMultipleGWs = true @@ -10466,14 +10808,14 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { &egressipv1.EgressIPList{ Items: []egressipv1.EgressIP{eIP}, }, - &v1.NodeList{ - Items: []v1.Node{node1, node2}, + &corev1.NodeList{ + Items: []corev1.Node{node1, node2}, }, - &v1.NamespaceList{ - Items: []v1.Namespace{*egressNamespace}, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace}, }, - &v1.PodList{ - Items: []v1.Pod{egressPod1, egressPod2}, + &corev1.PodList{ + Items: []corev1.Pod{egressPod1, egressPod2}, }, ) @@ -10493,38 +10835,41 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { err = fakeOvn.controller.WatchEgressIP() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - egressSVCServedPodsASv4, _ := buildEgressIPServiceAddressSets(nil) - egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP, podV4IP2}) + egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) + egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP, podV4IP2}, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) egressNodeIPsASv4, _ := buildEgressIPNodeAddressSets([]string{node1IPv4, node2IPv4}) - node1Switch.QOSRules = []string{"egressip-QoS-UUID"} - node2Switch.QOSRules = []string{"egressip-QoS-UUID"} + node1Switch.QOSRules = []string{"default-QoS-UUID"} + node2Switch.QOSRules = []string{"default-QoS-UUID"} expectedDatabaseState := []libovsdbtest.TestData{ &nbdb.LogicalRouterPolicy{ Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", - Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"}, + Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node1.Name, @@ -10578,7 +10923,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, node1Switch, node2Switch, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressIPServedPodsASv4, egressNodeIPsASv4, } gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) @@ -10603,48 +10948,47 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.EgressIPReroutePriority, - Match: fmt.Sprintf("ip4.src == %s", egressPod1.Status.PodIP), - Action: nbdb.LogicalRouterPolicyActionReroute, - Nexthops: []string{"100.64.0.2"}, - ExternalIDs: map[string]string{ - "name": eIP.Name, - }, - UUID: "reroute-UUID1", + Priority: types.EgressIPReroutePriority, + Match: fmt.Sprintf("ip4.src == %s", egressPod1.Status.PodIP), + Action: nbdb.LogicalRouterPolicyActionReroute, + Nexthops: []string{"100.64.0.2"}, + ExternalIDs: getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod1.Namespace, egressPod1.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + UUID: "reroute-UUID1", }, &nbdb.LogicalRouterPolicy{ - Priority: types.EgressIPReroutePriority, - Match: fmt.Sprintf("ip4.src == %s", egressPod2.Status.PodIP), - Action: nbdb.LogicalRouterPolicyActionReroute, - Nexthops: []string{"100.64.0.2"}, - ExternalIDs: map[string]string{ - "name": eIP.Name, - }, - UUID: "reroute-UUID2", + Priority: types.EgressIPReroutePriority, + Match: fmt.Sprintf("ip4.src == %s", egressPod2.Status.PodIP), + Action: nbdb.LogicalRouterPolicyActionReroute, + Nexthops: []string{"100.64.0.2"}, + ExternalIDs: getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod2.Namespace, egressPod2.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + UUID: "reroute-UUID2", }, &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", "reroute-UUID1", - "reroute-UUID2", "egressip-no-reroute-reply-traffic"}, + "reroute-UUID2", "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node1.Name, @@ -10658,12 +11002,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Ports: []string{types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node2.Name + "-UUID"}, }, &nbdb.NAT{ - UUID: "egressip-nat-UUID1", - LogicalIP: podV4IP, - ExternalIP: eips[0], - ExternalIDs: map[string]string{ - "name": egressIPName, - }, + UUID: "egressip-nat-UUID1", + LogicalIP: podV4IP, + ExternalIP: eips[0], + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod1.Namespace, egressPod1.Name, IPFamilyValueV4, fakeOvn.controller.controllerName).GetExternalIDs(), Type: nbdb.NATTypeSNAT, LogicalPort: &expectedNatLogicalPort1, Options: map[string]string{ @@ -10671,12 +11013,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, }, &nbdb.NAT{ - UUID: "egressip-nat-UUID2", - LogicalIP: "10.128.0.16", - ExternalIP: eips[0], - ExternalIDs: map[string]string{ - "name": egressIPName, - }, + UUID: "egressip-nat-UUID2", + LogicalIP: "10.128.0.16", + ExternalIP: eips[0], + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod2.Namespace, egressPod2.Name, IPFamilyValueV4, fakeOvn.controller.controllerName).GetExternalIDs(), Type: nbdb.NATTypeSNAT, LogicalPort: &expectedNatLogicalPort1, Options: map[string]string{ @@ -10725,7 +11065,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, node1Switch, node2Switch, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressIPServedPodsASv4, egressNodeIPsASv4, } gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) @@ -10750,7 +11090,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { EgressIP: egressIP2, }, } - err = fakeOvn.controller.patchReplaceEgressIPStatus(egressIPName, status) + err = fakeOvn.controller.eIPC.patchReplaceEgressIPStatus(egressIPName, status) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Eventually(getEgressIPStatusLen(egressIPName)).Should(gomega.Equal(2)) gomega.Eventually(getEgressIPReassignmentCount).Should(gomega.Equal(0)) @@ -10765,50 +11105,47 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.EgressIPReroutePriority, - Match: fmt.Sprintf("ip4.src == %s", egressPod1.Status.PodIP), - Action: nbdb.LogicalRouterPolicyActionReroute, - Nexthops: []string{"100.64.0.2", "100.64.0.3"}, - ExternalIDs: map[string]string{ - "name": eIP.Name, - }, - UUID: "reroute-UUID1", + Priority: types.EgressIPReroutePriority, + Match: fmt.Sprintf("ip4.src == %s", egressPod1.Status.PodIP), + Action: nbdb.LogicalRouterPolicyActionReroute, + Nexthops: []string{"100.64.0.2", "100.64.0.3"}, + ExternalIDs: getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod1.Namespace, egressPod1.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + UUID: "reroute-UUID1", }, &nbdb.LogicalRouterPolicy{ - Priority: types.EgressIPReroutePriority, - Match: fmt.Sprintf("ip4.src == %s", egressPod2.Status.PodIP), - Action: nbdb.LogicalRouterPolicyActionReroute, - Nexthops: []string{"100.64.0.2", "100.64.0.3"}, - ExternalIDs: map[string]string{ - "name": eIP.Name, - }, - UUID: "reroute-UUID2", + Priority: types.EgressIPReroutePriority, + Match: fmt.Sprintf("ip4.src == %s", egressPod2.Status.PodIP), + Action: nbdb.LogicalRouterPolicyActionReroute, + Nexthops: []string{"100.64.0.2", "100.64.0.3"}, + ExternalIDs: getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod2.Namespace, egressPod2.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + UUID: "reroute-UUID2", }, &nbdb.NAT{ - UUID: "egressip-nat-UUID1", - LogicalIP: podV4IP, - ExternalIP: eips[0], - ExternalIDs: map[string]string{ - "name": egressIPName, - }, + UUID: "egressip-nat-UUID1", + LogicalIP: podV4IP, + ExternalIP: eips[0], + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod1.Namespace, egressPod1.Name, IPFamilyValueV4, fakeOvn.controller.controllerName).GetExternalIDs(), Type: nbdb.NATTypeSNAT, LogicalPort: &expectedNatLogicalPort1, Options: map[string]string{ @@ -10816,12 +11153,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, }, &nbdb.NAT{ - UUID: "egressip-nat-UUID2", - LogicalIP: "10.128.0.16", - ExternalIP: eips[0], - ExternalIDs: map[string]string{ - "name": egressIPName, - }, + UUID: "egressip-nat-UUID2", + LogicalIP: "10.128.0.16", + ExternalIP: eips[0], + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod2.Namespace, egressPod2.Name, IPFamilyValueV4, fakeOvn.controller.controllerName).GetExternalIDs(), Type: nbdb.NATTypeSNAT, LogicalPort: &expectedNatLogicalPort1, Options: map[string]string{ @@ -10829,12 +11164,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, }, &nbdb.NAT{ - UUID: "egressip-nat-UUID3", - LogicalIP: podV4IP, - ExternalIP: eips[1], - ExternalIDs: map[string]string{ - "name": egressIPName, - }, + UUID: "egressip-nat-UUID3", + LogicalIP: podV4IP, + ExternalIP: eips[1], + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod1.Namespace, egressPod1.Name, IPFamilyValueV4, fakeOvn.controller.controllerName).GetExternalIDs(), Type: nbdb.NATTypeSNAT, LogicalPort: &expectedNatLogicalPort2, Options: map[string]string{ @@ -10842,12 +11175,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, }, &nbdb.NAT{ - UUID: "egressip-nat-UUID4", - LogicalIP: "10.128.0.16", - ExternalIP: eips[1], - ExternalIDs: map[string]string{ - "name": egressIPName, - }, + UUID: "egressip-nat-UUID4", + LogicalIP: "10.128.0.16", + ExternalIP: eips[1], + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod2.Namespace, egressPod2.Name, IPFamilyValueV4, fakeOvn.controller.controllerName).GetExternalIDs(), Type: nbdb.NATTypeSNAT, LogicalPort: &expectedNatLogicalPort2, Options: map[string]string{ @@ -10858,7 +11189,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", "reroute-UUID1", - "reroute-UUID2", "egressip-no-reroute-reply-traffic"}, + "reroute-UUID2", "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node1.Name, @@ -10914,7 +11245,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, node1Switch, node2Switch, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressIPServedPodsASv4, egressNodeIPsASv4, } gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) @@ -10935,50 +11266,47 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.EgressIPReroutePriority, - Match: fmt.Sprintf("ip4.src == %s", egressPod1.Status.PodIP), - Action: nbdb.LogicalRouterPolicyActionReroute, - Nexthops: nodeLogicalRouterIPv4, - ExternalIDs: map[string]string{ - "name": eIP.Name, - }, - UUID: "reroute-UUID1", + Priority: types.EgressIPReroutePriority, + Match: fmt.Sprintf("ip4.src == %s", egressPod1.Status.PodIP), + Action: nbdb.LogicalRouterPolicyActionReroute, + Nexthops: nodeLogicalRouterIPv4, + ExternalIDs: getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod1.Namespace, egressPod1.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + UUID: "reroute-UUID1", }, &nbdb.LogicalRouterPolicy{ - Priority: types.EgressIPReroutePriority, - Match: fmt.Sprintf("ip4.src == %s", egressPod2.Status.PodIP), - Action: nbdb.LogicalRouterPolicyActionReroute, - Nexthops: nodeLogicalRouterIPv4, - ExternalIDs: map[string]string{ - "name": eIP.Name, - }, - UUID: "reroute-UUID2", + Priority: types.EgressIPReroutePriority, + Match: fmt.Sprintf("ip4.src == %s", egressPod2.Status.PodIP), + Action: nbdb.LogicalRouterPolicyActionReroute, + Nexthops: nodeLogicalRouterIPv4, + ExternalIDs: getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod2.Namespace, egressPod2.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + UUID: "reroute-UUID2", }, &nbdb.NAT{ - UUID: "egressip-nat-UUID1", - LogicalIP: podV4IP, - ExternalIP: eips[0], - ExternalIDs: map[string]string{ - "name": egressIPName, - }, + UUID: "egressip-nat-UUID1", + LogicalIP: podV4IP, + ExternalIP: eips[0], + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod1.Namespace, egressPod1.Name, IPFamilyValueV4, fakeOvn.controller.controllerName).GetExternalIDs(), Type: nbdb.NATTypeSNAT, LogicalPort: &expectedNatLogicalPort1, Options: map[string]string{ @@ -10986,12 +11314,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, }, &nbdb.NAT{ - UUID: "egressip-nat-UUID2", - LogicalIP: "10.128.0.16", - ExternalIP: eips[0], - ExternalIDs: map[string]string{ - "name": egressIPName, - }, + UUID: "egressip-nat-UUID2", + LogicalIP: "10.128.0.16", + ExternalIP: eips[0], + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod2.Namespace, egressPod2.Name, IPFamilyValueV4, fakeOvn.controller.controllerName).GetExternalIDs(), Type: nbdb.NATTypeSNAT, LogicalPort: &expectedNatLogicalPort1, Options: map[string]string{ @@ -11011,7 +11337,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", "reroute-UUID1", "reroute-UUID2", - "egressip-no-reroute-reply-traffic"}, + "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node1.Name, @@ -11067,7 +11393,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, node1Switch, node2Switch, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressIPServedPodsASv4, egressNodeIPsASv4, } gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) @@ -11082,35 +11408,38 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { // For the sake of unit testing egressip zone controller we need to patch egressIP object manually // There are tests in cluster-manager package covering the patch logic. status = []egressipv1.EgressIPStatusItem{} - err = fakeOvn.controller.patchReplaceEgressIPStatus(egressIPName, status) + err = fakeOvn.controller.eIPC.patchReplaceEgressIPStatus(egressIPName, status) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Eventually(getEgressIPStatusLen(egressIPName)).Should(gomega.Equal(0)) gomega.Eventually(getEgressIPReassignmentCount).Should(gomega.Equal(1)) // though 2 egressIPs to be re-assigned its only 1 egressIP object - egressIPServedPodsASv4, _ = buildEgressIPServedPodsAddressSets(nil) + egressIPServedPodsASv4, _ = buildEgressIPServedPodsAddressSets(nil, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) expectedDatabaseState = []libovsdbtest.TestData{ &nbdb.LogicalRouterPolicy{ Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.NAT{ UUID: "egressip-nat-UUID1", LogicalIP: podV4IP, @@ -11133,7 +11462,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", - "egressip-no-reroute-reply-traffic"}, + "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node1.Name, @@ -11189,7 +11518,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, node1Switch, node2Switch, - getDefaultQoSRule(false), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressIPServedPodsASv4, egressNodeIPsASv4, } gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) @@ -11359,14 +11688,14 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { &egressipv1.EgressIPList{ Items: []egressipv1.EgressIP{eIP}, }, - &v1.NodeList{ - Items: []v1.Node{node1, node3}, + &corev1.NodeList{ + Items: []corev1.Node{node1, node3}, }, - &v1.NamespaceList{ - Items: []v1.Namespace{*egressNamespace}, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace}, }, - &v1.PodList{ - Items: []v1.Pod{egressPod1}, + &corev1.PodList{ + Items: []corev1.Pod{egressPod1}, }) i, n, _ := net.ParseCIDR(podV4IP + "/23") @@ -11382,29 +11711,31 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { err = fakeOvn.controller.WatchEgressIP() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - egressSVCServedPodsASv4, egressSVCServedPodsASv6 := buildEgressIPServiceAddressSets(nil) - egressIPServedPodsASv4, egressIPServedPodsASv6 := buildEgressIPServedPodsAddressSets([]string{podV4IP}) + egressSVCServedPodsASv4, egressSVCServedPodsASv6 := buildEgressServiceAddressSets(nil) + egressIPServedPodsASv4, egressIPServedPodsASv6 := buildEgressIPServedPodsAddressSets([]string{podV4IP}, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) egressNodeIPsASv4, egressNodeIPsASv6 := buildEgressIPNodeAddressSets([]string{node1IPv4, node1IPv6, node3IPv4}) - node1Switch.QOSRules = []string{"egressip-QoS-UUID", "egressip-QoSv6-UUID"} - node3Switch.QOSRules = []string{"egressip-QoS-UUID", "egressip-QoSv6-UUID"} + node1Switch.QOSRules = []string{"default-QoS-UUID", "default-QoSv6-UUID"} + node3Switch.QOSRules = []string{"default-QoS-UUID", "default-QoSv6-UUID"} expectedDatabaseState := []libovsdbtest.TestData{ &nbdb.LogicalRouterPolicy{ Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.LogicalRouterPolicy{ Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip6.src == $%s || ip6.src == $%s) && ip6.dst == $%s", egressIPServedPodsASv6.Name, egressSVCServedPodsASv6.Name, egressNodeIPsASv6.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-v6-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-v6-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV6, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPort{ UUID: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1.Name + "-UUID", @@ -11422,22 +11753,24 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Networks: []string{node3LogicalRouterIfAddrV4}, }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouter{ Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", - "default-v6-no-reroute-node-UUID", "egressip-no-reroute-reply-traffic"}, + "default-v6-no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node1.Name, @@ -11502,8 +11835,8 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { node1Switch, node2Switch, node3Switch, - getDefaultQoSRule(false), - getDefaultQoSRule(true), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), + getDefaultQoSRule(true, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressSVCServedPodsASv6, egressIPServedPodsASv4, egressIPServedPodsASv6, egressNodeIPsASv4, egressNodeIPsASv6, } gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) @@ -11518,7 +11851,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { EgressIP: egressIP2, }, } - err = fakeOvn.controller.patchReplaceEgressIPStatus(egressIPName, status) + err = fakeOvn.controller.eIPC.patchReplaceEgressIPStatus(egressIPName, status) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Eventually(getEgressIPStatusLen(egressIPName)).Should(gomega.Equal(2)) gomega.Eventually(getEgressIPReassignmentCount).Should(gomega.Equal(0)) @@ -11529,7 +11862,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Nodes().Create(context.TODO(), &node2, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - node2Switch.QOSRules = []string{"egressip-QoS-UUID", "egressip-QoSv6-UUID"} + node2Switch.QOSRules = []string{"default-QoS-UUID", "default-QoSv6-UUID"} egressNodeIPsASv4, egressNodeIPsASv6 = buildEgressIPNodeAddressSets([]string{node1IPv4, node1IPv6, node2IPv4, node3IPv4}) expectedNatLogicalPort1 := "k8s-node1" expectedNatLogicalPort3 := "k8s-node3" @@ -11538,18 +11871,20 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.LogicalRouterPolicy{ Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip6.src == $%s || ip6.src == $%s) && ip6.dst == $%s", egressIPServedPodsASv6.Name, egressSVCServedPodsASv6.Name, egressNodeIPsASv6.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-v6-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-v6-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV6, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPort{ UUID: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1.Name + "-UUID", @@ -11567,34 +11902,33 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Networks: []string{node3LogicalRouterIfAddrV4}, }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ Priority: types.EgressIPReroutePriority, Match: fmt.Sprintf("ip4.src == %s", egressPod1.Status.PodIP), Action: nbdb.LogicalRouterPolicyActionReroute, Nexthops: []string{"100.64.0.2", "100.64.0.4"}, - ExternalIDs: map[string]string{ - "name": eIP.Name, - }, + ExternalIDs: getEgressIPLRPReRouteDbIDs(egressIPName, egressPod1.Namespace, egressPod1.Name, + "ip4", types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), UUID: "reroute-UUID1", }, &nbdb.NAT{ - UUID: "egressip-nat-UUID1", - LogicalIP: podV4IP, - ExternalIP: egressIPs[0], - ExternalIDs: map[string]string{ - "name": egressIPName, - }, + UUID: "egressip-nat-UUID1", + LogicalIP: podV4IP, + ExternalIP: egressIPs[0], + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod1.Namespace, egressPod1.Name, IPFamilyValueV4, fakeOvn.controller.controllerName).GetExternalIDs(), Type: nbdb.NATTypeSNAT, LogicalPort: &expectedNatLogicalPort1, Options: map[string]string{ @@ -11602,12 +11936,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, }, &nbdb.NAT{ - UUID: "egressip-nat-UUID2", - LogicalIP: podV4IP, - ExternalIP: egressIPs[1], - ExternalIDs: map[string]string{ - "name": egressIPName, - }, + UUID: "egressip-nat-UUID2", + LogicalIP: podV4IP, + ExternalIP: egressIPs[1], + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod1.Namespace, egressPod1.Name, IPFamilyValueV4, fakeOvn.controller.controllerName).GetExternalIDs(), Type: nbdb.NATTypeSNAT, LogicalPort: &expectedNatLogicalPort3, Options: map[string]string{ @@ -11618,7 +11950,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", - "default-v6-no-reroute-node-UUID", "egressip-no-reroute-reply-traffic", "reroute-UUID1"}, + "default-v6-no-reroute-node-UUID", "default-no-reroute-reply-traffic", "reroute-UUID1"}, }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node1.Name, @@ -11685,8 +12017,8 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { node1Switch, node2Switch, node3Switch, - getDefaultQoSRule(false), - getDefaultQoSRule(true), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), + getDefaultQoSRule(true, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressSVCServedPodsASv6, egressIPServedPodsASv4, egressIPServedPodsASv6, egressNodeIPsASv4, egressNodeIPsASv6, } gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) @@ -11709,7 +12041,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { EgressIP: egressIP2, }, } - err = fakeOvn.controller.patchReplaceEgressIPStatus(egressIPName, status) + err = fakeOvn.controller.eIPC.patchReplaceEgressIPStatus(egressIPName, status) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Eventually(getEgressIPStatusLen(egressIPName)).Should(gomega.Equal(2)) gomega.Eventually(getEgressIPReassignmentCount).Should(gomega.Equal(0)) @@ -11725,18 +12057,20 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), &nbdb.LogicalRouterPolicy{ Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip6.src == $%s || ip6.src == $%s) && ip6.dst == $%s", egressIPServedPodsASv6.Name, egressSVCServedPodsASv6.Name, egressNodeIPsASv6.Name), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-v6-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-v6-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV6, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPort{ UUID: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1.Name + "-UUID", @@ -11754,34 +12088,33 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Networks: []string{node3LogicalRouterIfAddrV4}, }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ - Priority: types.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), }, &nbdb.LogicalRouterPolicy{ Priority: types.EgressIPReroutePriority, Match: fmt.Sprintf("ip4.src == %s", egressPod1.Status.PodIP), Action: nbdb.LogicalRouterPolicyActionReroute, Nexthops: []string{"100.64.0.3", "100.64.0.4"}, - ExternalIDs: map[string]string{ - "name": eIP.Name, - }, + ExternalIDs: getEgressIPLRPReRouteDbIDs(egressIPName, egressPod1.Namespace, egressPod1.Name, + "ip4", types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), UUID: "reroute-UUID1", }, &nbdb.NAT{ - UUID: "egressip-nat-UUID1", - LogicalIP: podV4IP, - ExternalIP: egressIPs[0], - ExternalIDs: map[string]string{ - "name": egressIPName, - }, + UUID: "egressip-nat-UUID1", + LogicalIP: podV4IP, + ExternalIP: egressIPs[0], + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod1.Namespace, egressPod1.Name, IPFamilyValueV4, fakeOvn.controller.controllerName).GetExternalIDs(), Type: nbdb.NATTypeSNAT, LogicalPort: &expectedNatLogicalPort2, Options: map[string]string{ @@ -11789,12 +12122,10 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { }, }, &nbdb.NAT{ - UUID: "egressip-nat-UUID2", - LogicalIP: podV4IP, - ExternalIP: egressIPs[1], - ExternalIDs: map[string]string{ - "name": egressIPName, - }, + UUID: "egressip-nat-UUID2", + LogicalIP: podV4IP, + ExternalIP: egressIPs[1], + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod1.Namespace, egressPod1.Name, IPFamilyValueV4, fakeOvn.controller.controllerName).GetExternalIDs(), Type: nbdb.NATTypeSNAT, LogicalPort: &expectedNatLogicalPort3, Options: map[string]string{ @@ -11805,7 +12136,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { Name: types.OVNClusterRouter, UUID: types.OVNClusterRouter + "-UUID", Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "default-no-reroute-node-UUID", - "default-v6-no-reroute-node-UUID", "egressip-no-reroute-reply-traffic", "reroute-UUID1"}, + "default-v6-no-reroute-node-UUID", "default-no-reroute-reply-traffic", "reroute-UUID1"}, }, &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node1.Name, @@ -11872,8 +12203,8 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { node1Switch, node2Switch, node3Switch, - getDefaultQoSRule(false), - getDefaultQoSRule(true), + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), + getDefaultQoSRule(true, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), egressSVCServedPodsASv4, egressSVCServedPodsASv6, egressIPServedPodsASv4, egressIPServedPodsASv6, egressNodeIPsASv4, egressNodeIPsASv6, } gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) @@ -11883,38 +12214,41 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) + }) }) // TEST UTILITY FUNCTIONS; // reduces redundant code -func getDefaultQoSRule(isv6 bool) *nbdb.QoS { - egressipPodsV4, egressipPodsV6 := addressset.GetHashNamesForAS(getEgressIPAddrSetDbIDs(EgressIPServedPodsAddrSetName, DefaultNetworkControllerName)) +func getDefaultQoSRule(isv6 bool, network, controller string) *nbdb.QoS { + egressipPodsV4, egressipPodsV6 := addressset.GetHashNamesForAS(getEgressIPAddrSetDbIDs(EgressIPServedPodsAddrSetName, network, controller)) qos := &nbdb.QoS{ Priority: types.EgressIPRerouteQoSRulePriority, Action: map[string]int{"mark": types.EgressIPReplyTrafficConnectionMark}, - ExternalIDs: getEgressIPQoSRuleDbIDs(IPFamilyValueV4).GetExternalIDs(), + ExternalIDs: getEgressIPQoSRuleDbIDs(IPFamilyValueV4, network, controller).GetExternalIDs(), Direction: nbdb.QoSDirectionFromLport, - UUID: "egressip-QoS-UUID", + UUID: fmt.Sprintf("%s-QoS-UUID", network), Match: fmt.Sprintf(`ip4.src == $%s && ct.trk && ct.rpl`, egressipPodsV4), } if isv6 { - qos.UUID = "egressip-QoSv6-UUID" + qos.UUID = fmt.Sprintf("%s-QoSv6-UUID", network) qos.Match = fmt.Sprintf(`ip6.src == $%s && ct.trk && ct.rpl`, egressipPodsV6) - qos.ExternalIDs = getEgressIPQoSRuleDbIDs(IPFamilyValueV6).GetExternalIDs() + qos.ExternalIDs = getEgressIPQoSRuleDbIDs(IPFamilyValueV6, network, controller).GetExternalIDs() } return qos } -func getEIPSNAT(podIP, egressIP, expectedNatLogicalPort string) *nbdb.NAT { +func getEIPSNAT(podIP, podNamespace, podName, egressIP, expectedNatLogicalPort, controllerName string) *nbdb.NAT { + ipFamily := IPFamilyValueV4 + if utilnet.IsIPv6String(podIP) { + ipFamily = IPFamilyValueV6 + } return &nbdb.NAT{ - UUID: "egressip-nat-UUID", - LogicalIP: podIP, - ExternalIP: egressIP, - ExternalIDs: map[string]string{ - "name": egressIPName, - }, + UUID: "egressip-nat-UUID", + LogicalIP: podIP, + ExternalIP: egressIP, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, podNamespace, podName, ipFamily, controllerName).GetExternalIDs(), Type: nbdb.NATTypeSNAT, LogicalPort: &expectedNatLogicalPort, Options: map[string]string{ @@ -11923,13 +12257,13 @@ func getEIPSNAT(podIP, egressIP, expectedNatLogicalPort string) *nbdb.NAT { } } -func getNoReRouteReplyTrafficPolicy() *nbdb.LogicalRouterPolicy { +func getNoReRouteReplyTrafficPolicy(network, controller string) *nbdb.LogicalRouterPolicy { return &nbdb.LogicalRouterPolicy{ Priority: types.DefaultNoRereoutePriority, Match: fmt.Sprintf("pkt.mark == %d", types.EgressIPReplyTrafficConnectionMark), Action: nbdb.LogicalRouterPolicyActionAllow, - ExternalIDs: getEgressIPLRPNoReRouteDbIDs(types.DefaultNoRereoutePriority, ReplyTrafficNoReroute, IPFamilyValue).GetExternalIDs(), - UUID: "egressip-no-reroute-reply-traffic", + ExternalIDs: getEgressIPLRPNoReRouteDbIDs(types.DefaultNoRereoutePriority, ReplyTrafficNoReroute, IPFamilyValue, network, controller).GetExternalIDs(), + UUID: fmt.Sprintf("%s-no-reroute-reply-traffic", network), } } @@ -11953,52 +12287,52 @@ func getReRouteStaticRoute(clusterSubnet, nextHop string) *nbdb.LogicalRouterSta } } -func getNodeObj(nodeName string, annotations, labels map[string]string) v1.Node { - return v1.Node{ +func getNodeObj(nodeName string, annotations, labels map[string]string) corev1.Node { + return corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: nodeName, Annotations: annotations, Labels: labels, }, - Status: v1.NodeStatus{ - Conditions: []v1.NodeCondition{ + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ { - Type: v1.NodeReady, - Status: v1.ConditionTrue, + Type: corev1.NodeReady, + Status: corev1.ConditionTrue, }, }, }, } } -func getSwitchManagementPortIP(node *v1.Node) (net.IP, error) { +func getSwitchManagementPortIP(node *corev1.Node) (net.IP, error) { // fetch node annotation of the egress node - networkName := "default" - ipNets, err := util.ParseNodeHostSubnetAnnotation(node, networkName) + network := "default" + ipNets, err := util.ParseNodeHostSubnetAnnotation(node, network) if err != nil { return nil, fmt.Errorf("failed to parse node (%s) subnets to get management port IP: %v", node.Name, err) } - for _, ipnet := range ipNets { - return util.GetNodeManagementIfAddr(ipnet).IP, nil + for _, ipNet := range ipNets { + return util.GetNodeManagementIfAddr(ipNet).IP, nil } return nil, fmt.Errorf("failed to find management port IP for node %s", node.Name) } // returns the address set with externalID "k8s.ovn.org/name": "egresssvc-served-pods" -func buildEgressIPServiceAddressSets(ips []string) (*nbdb.AddressSet, *nbdb.AddressSet) { +func buildEgressServiceAddressSets(ips []string) (*nbdb.AddressSet, *nbdb.AddressSet) { dbIDs := egresssvc.GetEgressServiceAddrSetDbIDs(DefaultNetworkControllerName) return addressset.GetTestDbAddrSets(dbIDs, ips) } // returns the address set with externalID "k8s.ovn.org/name": "egressip-served-pods"" -func buildEgressIPServedPodsAddressSets(ips []string) (*nbdb.AddressSet, *nbdb.AddressSet) { - dbIDs := getEgressIPAddrSetDbIDs(EgressIPServedPodsAddrSetName, DefaultNetworkControllerName) +func buildEgressIPServedPodsAddressSets(ips []string, network, controller string) (*nbdb.AddressSet, *nbdb.AddressSet) { + dbIDs := getEgressIPAddrSetDbIDs(EgressIPServedPodsAddrSetName, network, controller) return addressset.GetTestDbAddrSets(dbIDs, ips) } // returns the address set with externalID "k8s.ovn.org/name": "node-ips" func buildEgressIPNodeAddressSets(ips []string) (*nbdb.AddressSet, *nbdb.AddressSet) { - dbIDs := getEgressIPAddrSetDbIDs(NodeIPAddrSetName, DefaultNetworkControllerName) + dbIDs := getEgressIPAddrSetDbIDs(NodeIPAddrSetName, types.DefaultNetworkName, DefaultNetworkControllerName) return addressset.GetTestDbAddrSets(dbIDs, ips) } diff --git a/go-controller/pkg/ovn/egressip_udn_l3_test.go b/go-controller/pkg/ovn/egressip_udn_l3_test.go new file mode 100644 index 0000000000..b0e4f6b7dc --- /dev/null +++ b/go-controller/pkg/ovn/egressip_udn_l3_test.go @@ -0,0 +1,2712 @@ +package ovn + +import ( + "context" + "encoding/json" + "fmt" + "net" + + ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + egressipv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/controller/udnenabledsvc" + libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" + ovntypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + k8stypes "k8s.io/apimachinery/pkg/types" + + cnitypes "github.com/containernetworking/cni/pkg/types" + nadv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + "github.com/urfave/cli/v2" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" + "k8s.io/klog/v2" +) + +var _ = ginkgo.Describe("EgressIP Operations for user defined network with topology L3", func() { + var ( + app *cli.App + fakeOvn *FakeOVN + ) + + const ( + nadName1 = "nad1" + networkName1 = "network1" + networkName1_ = networkName1 + "_" + node1Name = "node1" + v4Net1 = "20.128.0.0/14" + v4Node1Net1 = "20.128.0.0/16" + v4Pod1IPNode1Net1 = "20.128.0.5" + podName3 = "egress-pod3" + v4Pod2IPNode1Net1 = "20.128.0.6" + v4Node1Tsp = "100.88.0.2" + node2Name = "node2" + v4Node2Net1 = "20.129.0.0/16" + v4Node2Tsp = "100.88.0.3" + podName4 = "egress-pod4" + v4Pod1IPNode2Net1 = "20.129.0.2" + v4Pod2IPNode2Net1 = "20.129.0.3" + eIP1Mark = 50000 + eIP2Mark = 50001 + ) + + getEgressIPStatusLen := func(egressIPName string) func() int { + return func() int { + tmp, err := fakeOvn.fakeClient.EgressIPClient.K8sV1().EgressIPs().Get(context.TODO(), egressIPName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return len(tmp.Status.Items) + } + } + + getIPNetWithIP := func(cidr string) *net.IPNet { + ip, ipNet, err := net.ParseCIDR(cidr) + if err != nil { + panic(err.Error()) + } + ipNet.IP = ip + return ipNet + } + + setPrimaryNetworkAnnot := func(pod *corev1.Pod, nadName, cidr string) { + var err error + hwAddr, _ := net.ParseMAC("00:00:5e:00:53:01") + pod.Annotations, err = util.MarshalPodAnnotation(pod.Annotations, + &util.PodAnnotation{ + IPs: []*net.IPNet{getIPNetWithIP(cidr)}, + MAC: hwAddr, + Role: "primary", + }, + nadName) + if err != nil { + panic(err.Error()) + } + } + + ginkgo.BeforeEach(func() { + // Restore global default values before each testcase + gomega.Expect(config.PrepareTestConfig()).Should(gomega.Succeed()) + config.OVNKubernetesFeature.EnableEgressIP = true + config.OVNKubernetesFeature.EnableNetworkSegmentation = true + config.OVNKubernetesFeature.EnableInterconnect = true + config.OVNKubernetesFeature.EnableMultiNetwork = true + config.Gateway.Mode = config.GatewayModeShared + config.OVNKubernetesFeature.EgressIPNodeHealthCheckPort = 1234 + + app = cli.NewApp() + app.Name = "test" + app.Flags = config.Flags + + fakeOvn = NewFakeOVN(false) + }) + + ginkgo.AfterEach(func() { + fakeOvn.shutdown() + // Restore global default values + gomega.Expect(config.PrepareTestConfig()).Should(gomega.Succeed()) + }) + + ginkgo.Context("sync", func() { + ginkgo.It("should remove stale LRPs for marks and configures missing LRP marks", func() { + app.Action = func(ctx *cli.Context) error { + // Node 1 is local, Node 2 is remote + egressIP1 := "192.168.126.101" + egressIP2 := "192.168.126.102" + node1IPv4 := "192.168.126.202" + node1IPv4CIDR := node1IPv4 + "/24" + node2IPv4 := "192.168.126.51" + node2IPv4CIDR := node2IPv4 + "/24" + _, node1CDNSubnet, _ := net.ParseCIDR(v4Node1Subnet) + _, node1UDNSubnet, _ := net.ParseCIDR(v4Node1Net1) + nadName := util.GetNADName(eipNamespace2, nadName1) + egressCDNNamespace := newNamespaceWithLabels(eipNamespace, egressPodLabel) + egressUDNNamespace := newNamespaceWithLabels(eipNamespace2, egressPodLabel) + egressPodCDNLocal := *newPodWithLabels(eipNamespace, podName, node1Name, podV4IP, egressPodLabel) + egressPodUDNLocal := *newPodWithLabels(eipNamespace2, podName2, node1Name, v4Pod1IPNode1Net1, egressPodLabel) + egressPodCDNRemote := *newPodWithLabels(eipNamespace, podName3, node2Name, podV4IP2, egressPodLabel) + setPrimaryNetworkAnnot(&egressPodCDNRemote, ovntypes.DefaultNetworkName, fmt.Sprintf("%s%s", podV4IP2, util.GetIPFullMaskString(podV4IP2))) + egressPodUDNRemote := *newPodWithLabels(eipNamespace2, podName4, node2Name, v4Pod2IPNode2Net1, egressPodLabel) + setPrimaryNetworkAnnot(&egressPodUDNRemote, nadName, fmt.Sprintf("%s%s", v4Pod2IPNode2Net1, util.GetIPFullMaskString(v4Pod2IPNode2Net1))) + netconf := ovncnitypes.NetConf{ + NetConf: cnitypes.NetConf{ + Name: networkName1, + Type: "ovn-k8s-cni-overlay", + }, + Role: ovntypes.NetworkRolePrimary, + Topology: ovntypes.Layer3Topology, + NADName: nadName, + Subnets: v4Net1, + } + nad, err := newNetworkAttachmentDefinition( + eipNamespace2, + nadName1, + netconf, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + netInfo, err := util.NewNetInfo(&netconf) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + node1Annotations := map[string]string{ + "k8s.ovn.org/node-primary-ifaddr": fmt.Sprintf("{\"ipv4\": \"%s\", \"ipv6\": \"%s\"}", node1IPv4CIDR, ""), + "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":\"%s\",\"%s\":\"%s\"}", v4Node1Subnet, networkName1, v4Node1Net1), + "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node1Tsp), + "k8s.ovn.org/zone-name": node1Name, + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), + } + labels := map[string]string{ + "k8s.ovn.org/egress-assignable": "", + } + node1 := getNodeObj(node1Name, node1Annotations, labels) + node2Annotations := map[string]string{ + "k8s.ovn.org/node-primary-ifaddr": fmt.Sprintf("{\"ipv4\": \"%s\", \"ipv6\": \"%s\"}", node2IPv4CIDR, ""), + "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":\"%s\",\"%s\":\"%s\"}", v4Node2Subnet, networkName1, v4Node2Net1), + "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node2Tsp), + "k8s.ovn.org/zone-name": node2Name, + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), + } + node2 := getNodeObj(node2Name, node2Annotations, labels) + eIP := egressipv1.EgressIP{ + ObjectMeta: newEgressIPMetaWithMark(egressIPName, eIP1Mark), + Spec: egressipv1.EgressIPSpec{ + EgressIPs: []string{egressIP1, egressIP2}, + PodSelector: metav1.LabelSelector{ + MatchLabels: egressPodLabel, + }, + NamespaceSelector: metav1.LabelSelector{ + MatchLabels: egressPodLabel, + }, + }, + Status: egressipv1.EgressIPStatus{ + Items: []egressipv1.EgressIPStatusItem{ + { + Node: node1Name, + EgressIP: egressIP1, + }, + { + Node: node2Name, + EgressIP: egressIP2, + }, + }, + }, + } + initialDB := []libovsdbtest.TestData{ + //CDN start + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name, + Networks: []string{nodeLogicalRouterIfAddrV4}, + }, + &nbdb.LogicalRouter{ + Name: ovntypes.OVNClusterRouter, + UUID: ovntypes.OVNClusterRouter + "-UUID", + }, + &nbdb.LogicalRouter{ + Name: ovntypes.GWRouterPrefix + node1.Name, + UUID: ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Ports: []string{ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID"}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + node1Name + "-UUID", + Name: "k8s-" + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1CDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: node1Name + "-UUID", + Name: node1Name, + Ports: []string{"k8s-" + node1Name + "-UUID"}, + }, + // UDN start + //getGWPktMarkLRPForController(eIP1Mark, egressIPName, eipNamespace2, podName3, v4Pod2IPNode1Net1, IPFamilyValueV4, networkName1, DefaultNetworkControllerName), + //getGWPktMarkLRPForController(eIP2Mark, egressIPName, eipNamespace2, podName4, v4Pod1IPNode2Net1, IPFamilyValueV4, networkName1, DefaultNetworkControllerName), //stale EIP mark + //getGWPktMarkLRPForController(eIP2Mark, egressIPName, eipNamespace2, podName2, v4Pod1IPNode1Net1, IPFamilyValueV4, networkName1, DefaultNetworkControllerName), //stale EIP mark + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name, + Networks: []string{nodeLogicalRouterIfAddrV4}, + }, + &nbdb.LogicalRouter{ + Name: netInfo.GetNetworkScopedClusterRouterName(), + UUID: netInfo.GetNetworkScopedClusterRouterName() + "-UUID", + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: networkName1, ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + }, + &nbdb.LogicalRouter{ + UUID: netInfo.GetNetworkScopedGWRouterName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedGWRouterName(node1.Name), + Ports: []string{ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: networkName1, ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + //Policies: []string{getGWPktMarkLRPUUID(eipNamespace2, podName3, IPFamilyValueV4, networkName1)}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + networkName1_ + node1Name + "-UUID", + Name: "k8s-" + networkName1_ + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedSwitchName(node1.Name), + Ports: []string{"k8s-" + networkName1_ + node1Name + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: networkName1, ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + QOSRules: []string{}, + }, + } + fakeOvn.startWithDBSetup( + libovsdbtest.TestSetup{ + NBData: initialDB, + }, + &corev1.NodeList{ + Items: []corev1.Node{node1, node2}, + }, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressCDNNamespace, *egressUDNNamespace}, + }, + &corev1.PodList{ + Items: []corev1.Pod{egressPodCDNLocal, egressPodUDNLocal, egressPodCDNRemote, egressPodUDNRemote}, + }, + &nadv1.NetworkAttachmentDefinitionList{ + Items: []nadv1.NetworkAttachmentDefinition{*nad}, + }, + &egressipv1.EgressIPList{ + Items: []egressipv1.EgressIP{eIP}, + }, + ) + asf := addressset.NewOvnAddressSetFactory(fakeOvn.nbClient, true, false) + // watch EgressIP depends on UDN enabled svcs address set being available + c := udnenabledsvc.NewController(fakeOvn.nbClient, asf, fakeOvn.controller.watchFactory.ServiceCoreInformer(), []string{}) + go func() { + gomega.Expect(c.Run(ctx.Done())).Should(gomega.Succeed()) + }() + // Add pod IPs to CDN cache + iCDN, nCDN, _ := net.ParseCIDR(podV4IP + "/23") + nCDN.IP = iCDN + fakeOvn.controller.logicalPortCache.add(&egressPodCDNLocal, "", ovntypes.DefaultNetworkName, "", nil, []*net.IPNet{nCDN}) + // Add pod IPs to UDN cache + iUDN, nUDN, _ := net.ParseCIDR(v4Pod1IPNode1Net1 + "/23") + nUDN.IP = iUDN + fakeOvn.controller.logicalPortCache.add(&egressPodUDNLocal, "", util.GetNADName(nad.Namespace, nad.Name), "", nil, []*net.IPNet{nUDN}) + fakeOvn.controller.eIPC.nodeZoneState.Store(node1Name, true) + fakeOvn.controller.eIPC.nodeZoneState.Store(node2Name, false) + fakeOvn.controller.eIPC.zone = node1.Name + fakeOvn.controller.zone = node1.Name + err = fakeOvn.eIPController.ensureL3ClusterRouterPoliciesForNetwork(netInfo) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.eIPController.ensureL3SwitchPoliciesForNode(netInfo, node1Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(fakeOvn.controller.eIPC.nadController.Start()).Should(gomega.Succeed()) + err = fakeOvn.controller.WatchEgressIPNamespaces() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressIPPods() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressNodes() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressIP() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + egressIPServedPodsASCDNv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}, ovntypes.DefaultNetworkName, DefaultNetworkControllerName) + egressNodeIPsASv4, _ := buildEgressIPNodeAddressSets([]string{node1IPv4, node2IPv4}) + egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) + egressIPServedPodsASUDNv4, _ := buildEgressIPServedPodsAddressSetsForController([]string{v4Pod1IPNode1Net1}, netInfo.GetNetworkName(), DefaultNetworkControllerName) + gomega.Eventually(c.IsAddressSetAvailable).Should(gomega.BeTrue()) + dbIDs := udnenabledsvc.GetAddressSetDBIDs() + udnEnabledSvcV4, _ := addressset.GetTestDbAddrSets(dbIDs, []string{}) + + node1LRP := "k8s-node1" + expectedDatabaseStateTwoEgressNodes := []libovsdbtest.TestData{ + // CDN + getReRouteStaticRoute(v4ClusterSubnet, nodeLogicalRouterIPv4[0]), + getReRoutePolicy(podV4IP, "4", "reroute-UUID", []string{nodeLogicalRouterIPv4[0], v4Node2Tsp}, + getEgressIPLRPReRouteDbIDs(eIP.Name, egressPodCDNLocal.Namespace, egressPodCDNLocal.Name, IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs()), + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4ClusterSubnet, v4ClusterSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4ClusterSubnet, config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouter{ + Name: ovntypes.GWRouterPrefix + node1.Name, + UUID: ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Ports: []string{ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID"}, + Nat: []string{"egressip-nat-UUID", "egressip-nat2-UUID"}, + }, + &nbdb.LogicalRouter{ + Name: ovntypes.OVNClusterRouter, + UUID: ovntypes.OVNClusterRouter + "-UUID", + Policies: []string{"default-no-reroute-UUID", "no-reroute-service-UUID", + "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic", "reroute-UUID"}, + StaticRoutes: []string{"reroute-static-route-UUID"}, + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name, + Networks: []string{"100.64.0.2/29"}, + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", + egressIPServedPodsASCDNv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": ovntypes.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + node1Name + "-UUID", + Name: "k8s-" + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1CDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: node1Name + "-UUID", + Name: node1Name, + Ports: []string{"k8s-" + node1Name + "-UUID"}, + QOSRules: []string{"default-QoS-UUID"}, + }, + &nbdb.NAT{ + UUID: "egressip-nat-UUID", + LogicalIP: podV4IP2, + ExternalIP: egressIP1, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPodCDNRemote.Namespace, egressPodCDNRemote.Name, IPFamilyValueV4, DefaultNetworkControllerName).GetExternalIDs(), + Type: nbdb.NATTypeSNAT, + LogicalPort: &node1LRP, + Options: map[string]string{ + "stateless": "false", + }, + }, + &nbdb.NAT{ + UUID: "egressip-nat2-UUID", + LogicalIP: podV4IP, + ExternalIP: egressIP1, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPodCDNLocal.Namespace, egressPodCDNLocal.Name, IPFamilyValueV4, DefaultNetworkControllerName).GetExternalIDs(), + Type: nbdb.NATTypeSNAT, + LogicalPort: &node1LRP, + Options: map[string]string{ + "stateless": "false", + }, + }, + getNoReRouteReplyTrafficPolicy(ovntypes.DefaultNetworkName, DefaultNetworkControllerName), + getDefaultQoSRule(false, ovntypes.DefaultNetworkName, DefaultNetworkControllerName), + egressSVCServedPodsASv4, + egressIPServedPodsASCDNv4, + egressNodeIPsASv4, + + // UDN + getReRouteStaticRouteForController(v4Net1, nodeLogicalRouterIPv4[0], netInfo.GetNetworkName()), + getReRoutePolicyForController(egressIPName, eipNamespace2, podName2, v4Pod1IPNode1Net1, eIP1Mark, IPFamilyValueV4, []string{nodeLogicalRouterIPv4[0], v4Node2Tsp}, netInfo.GetNetworkName(), DefaultNetworkControllerName), + getGWPktMarkLRPForController(eIP1Mark, egressIPName, eipNamespace2, podName2, v4Pod1IPNode1Net1, IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName), + getGWPktMarkLRPForController(eIP1Mark, egressIPName, eipNamespace2, podName4, v4Pod2IPNode2Net1, IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName), + getNoReRoutePolicyForUDNEnabledSvc(false, netInfo.GetNetworkName(), DefaultNetworkControllerName, egressIPServedPodsASUDNv4.Name, egressSVCServedPodsASv4.Name, udnEnabledSvcV4.Name), + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4Net1, v4Net1), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "udn-default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4Net1, config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "udn-no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", + egressIPServedPodsASUDNv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "udn-default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": ovntypes.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name, + Networks: []string{nodeLogicalRouterIfAddrV4}, + }, + &nbdb.LogicalRouter{ + Name: netInfo.GetNetworkScopedClusterRouterName(), + UUID: netInfo.GetNetworkScopedClusterRouterName() + "-UUID", + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + Policies: []string{"udn-default-no-reroute-node-UUID", "udn-default-no-reroute-UUID", "udn-no-reroute-service-UUID", + fmt.Sprintf("%s-no-reroute-reply-traffic", netInfo.GetNetworkName()), "udn-enabled-svc-no-reroute-UUID", + getReRoutePolicyUUID(eipNamespace2, podName2, IPFamilyValueV4, netInfo.GetNetworkName())}, + StaticRoutes: []string{fmt.Sprintf("%s-reroute-static-route-UUID", netInfo.GetNetworkName())}, + }, + &nbdb.LogicalRouter{ + UUID: netInfo.GetNetworkScopedGWRouterName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedGWRouterName(node1.Name), + Ports: []string{ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + Policies: []string{getGWPktMarkLRPUUID(eipNamespace2, podName2, IPFamilyValueV4, netInfo.GetNetworkName()), + getGWPktMarkLRPUUID(eipNamespace2, podName4, IPFamilyValueV4, netInfo.GetNetworkName())}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + networkName1_ + node1Name + "-UUID", + Name: "k8s-" + networkName1_ + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedSwitchName(node1.Name), + Ports: []string{"k8s-" + networkName1_ + node1Name + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + QOSRules: []string{fmt.Sprintf("%s-QoS-UUID", netInfo.GetNetworkName())}, + }, + getNoReRouteReplyTrafficPolicyForController(netInfo.GetNetworkName(), DefaultNetworkControllerName), + getDefaultQoSRule(false, netInfo.GetNetworkName(), DefaultNetworkControllerName), + egressIPServedPodsASUDNv4, + udnEnabledSvcV4, + } + ginkgo.By("ensure expected equals actual") + gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseStateTwoEgressNodes)) + return nil + } + err := app.Run([]string{app.Name}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + }) + + ginkgo.Context("EgressIP update", func() { + ginkgo.It("should update UDN and CDN config", func() { + // Test steps: + // update an EIP selecting a pod on an UDN and another pod on a CDN + // EIP egresses locally and remote + // EIP egresses remote + // EIP egresses locally and remote + app.Action = func(ctx *cli.Context) error { + // Node 1 is local, Node 2 is remote + egressIP1 := "192.168.126.101" + egressIP2 := "192.168.126.102" + node1IPv4 := "192.168.126.202" + node1IPv4CIDR := node1IPv4 + "/24" + node2IPv4 := "192.168.126.51" + node2IPv4CIDR := node2IPv4 + "/24" + _, node1CDNSubnet, _ := net.ParseCIDR(v4Node1Subnet) + _, node1UDNSubnet, _ := net.ParseCIDR(v4Node1Net1) + nadName := util.GetNADName(eipNamespace2, nadName1) + egressCDNNamespace := newNamespaceWithLabels(eipNamespace, egressPodLabel) + egressUDNNamespace := newNamespaceWithLabels(eipNamespace2, egressPodLabel) + egressPodCDNLocal := *newPodWithLabels(eipNamespace, podName, node1Name, podV4IP, egressPodLabel) + egressPodUDNLocal := *newPodWithLabels(eipNamespace2, podName2, node1Name, v4Pod1IPNode1Net1, egressPodLabel) + egressPodCDNRemote := *newPodWithLabels(eipNamespace, podName3, node2Name, podV4IP2, egressPodLabel) + setPrimaryNetworkAnnot(&egressPodCDNRemote, ovntypes.DefaultNetworkName, fmt.Sprintf("%s%s", podV4IP2, util.GetIPFullMaskString(podV4IP2))) + egressPodUDNRemote := *newPodWithLabels(eipNamespace2, podName4, node2Name, v4Pod2IPNode2Net1, egressPodLabel) + setPrimaryNetworkAnnot(&egressPodUDNRemote, nadName, fmt.Sprintf("%s%s", v4Pod2IPNode2Net1, util.GetIPFullMaskString(v4Pod2IPNode2Net1))) + + netconf := ovncnitypes.NetConf{ + NetConf: cnitypes.NetConf{ + Name: networkName1, + Type: "ovn-k8s-cni-overlay", + }, + Role: ovntypes.NetworkRolePrimary, + Topology: ovntypes.Layer3Topology, + NADName: nadName, + Subnets: v4Net1, + } + nad, err := newNetworkAttachmentDefinition( + eipNamespace2, + nadName1, + netconf, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + netInfo, err := util.NewNetInfo(&netconf) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + node1Annotations := map[string]string{ + "k8s.ovn.org/node-primary-ifaddr": fmt.Sprintf("{\"ipv4\": \"%s\", \"ipv6\": \"%s\"}", node1IPv4CIDR, ""), + "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":\"%s\",\"%s\":\"%s\"}", v4Node1Subnet, networkName1, v4Node1Net1), + "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node1Tsp), + "k8s.ovn.org/zone-name": node1Name, + "k8s.ovn.org/remote-zone-migrated": node1Name, + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), + } + labels := map[string]string{ + "k8s.ovn.org/egress-assignable": "", + } + node1 := getNodeObj(node1Name, node1Annotations, labels) + node2Annotations := map[string]string{ + "k8s.ovn.org/node-primary-ifaddr": fmt.Sprintf("{\"ipv4\": \"%s\", \"ipv6\": \"%s\"}", node2IPv4CIDR, ""), + "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":\"%s\",\"%s\":\"%s\"}", v4Node2Subnet, networkName1, v4Node2Net1), + "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node2Tsp), + "k8s.ovn.org/zone-name": node2Name, + "k8s.ovn.org/remote-zone-migrated": node2Name, + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), + } + node2 := getNodeObj(node2Name, node2Annotations, labels) + twoNodeStatus := []egressipv1.EgressIPStatusItem{ + { + Node: node1Name, + EgressIP: egressIP1, + }, + { + Node: node2Name, + EgressIP: egressIP2, + }, + } + eIP := egressipv1.EgressIP{ + ObjectMeta: newEgressIPMetaWithMark(egressIPName, eIP1Mark), + Spec: egressipv1.EgressIPSpec{ + EgressIPs: []string{egressIP1, egressIP2}, + PodSelector: metav1.LabelSelector{ + MatchLabels: egressPodLabel, + }, + NamespaceSelector: metav1.LabelSelector{ + MatchLabels: egressPodLabel, + }, + }, + Status: egressipv1.EgressIPStatus{ + Items: twoNodeStatus, + }, + } + ginkgo.By("create EgressIP that selects pods in a CDN and UDN") + initialDB := []libovsdbtest.TestData{ + //CDN start + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name, + Networks: []string{nodeLogicalRouterIfAddrV4}, + }, + &nbdb.LogicalRouter{ + Name: ovntypes.OVNClusterRouter, + UUID: ovntypes.OVNClusterRouter + "-UUID", + }, + &nbdb.LogicalRouter{ + Name: ovntypes.GWRouterPrefix + node1.Name, + UUID: ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Ports: []string{ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID"}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + node1Name + "-UUID", + Name: "k8s-" + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1CDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: node1Name + "-UUID", + Name: node1Name, + Ports: []string{"k8s-" + node1Name + "-UUID"}, + }, + // UDN start + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name, + Networks: []string{nodeLogicalRouterIfAddrV4}, + }, + &nbdb.LogicalRouter{ + Name: netInfo.GetNetworkScopedClusterRouterName(), + UUID: netInfo.GetNetworkScopedClusterRouterName() + "-UUID", + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: networkName1, ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + }, + &nbdb.LogicalRouter{ + UUID: netInfo.GetNetworkScopedGWRouterName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedGWRouterName(node1.Name), + Ports: []string{ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: networkName1, ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + networkName1_ + node1Name + "-UUID", + Name: "k8s-" + networkName1_ + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedSwitchName(node1.Name), + Ports: []string{"k8s-" + networkName1_ + node1Name + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: networkName1, ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + }, + } + fakeOvn.startWithDBSetup( + libovsdbtest.TestSetup{ + NBData: initialDB, + }, + &corev1.NodeList{ + Items: []corev1.Node{node1, node2}, + }, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressCDNNamespace, *egressUDNNamespace}, + }, + &corev1.PodList{ + Items: []corev1.Pod{egressPodCDNLocal, egressPodUDNLocal, egressPodCDNRemote, egressPodUDNRemote}, + }, + &nadv1.NetworkAttachmentDefinitionList{ + Items: []nadv1.NetworkAttachmentDefinition{*nad}, + }, + ) + asf := addressset.NewOvnAddressSetFactory(fakeOvn.nbClient, true, false) + // watch EgressIP depends on UDN enabled svcs address set being available + c := udnenabledsvc.NewController(fakeOvn.nbClient, asf, fakeOvn.controller.watchFactory.ServiceCoreInformer(), []string{}) + go func() { + gomega.Expect(c.Run(ctx.Done())).Should(gomega.Succeed()) + }() + // Add pod IPs to CDN cache + iCDN, nCDN, _ := net.ParseCIDR(podV4IP + "/23") + nCDN.IP = iCDN + fakeOvn.controller.zone = node1.Name + fakeOvn.eIPController.zone = node1.Name + fakeOvn.controller.logicalPortCache.add(&egressPodCDNLocal, "", ovntypes.DefaultNetworkName, "", nil, []*net.IPNet{nCDN}) + secConInfo, ok := fakeOvn.secondaryControllers[networkName1] + gomega.Expect(ok).To(gomega.BeTrue()) + err = fakeOvn.nadController.Start() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // simulate Start() of secondary network controller + err = fakeOvn.eIPController.ensureL3ClusterRouterPoliciesForNetwork(secConInfo.bnc.NetInfo) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.eIPController.ensureL3SwitchPoliciesForNode(secConInfo.bnc.NetInfo, node1Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressIPNamespaces() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressIPPods() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressNodes() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressIP() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // Add pod IPs to UDN cache + iUDN, nUDN, _ := net.ParseCIDR(v4Pod1IPNode1Net1 + "/23") + nUDN.IP = iUDN + secConInfo.bnc.logicalPortCache.add(&egressPodUDNLocal, "", util.GetNADName(nad.Namespace, nad.Name), "", nil, []*net.IPNet{nUDN}) + _, err = fakeOvn.fakeClient.EgressIPClient.K8sV1().EgressIPs().Create(context.TODO(), &eIP, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) + egressIPServedPodsASCDNv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}, ovntypes.DefaultNetworkName, DefaultNetworkControllerName) + egressNodeIPsASv4, _ := buildEgressIPNodeAddressSets([]string{node1IPv4, node2IPv4}) + egressIPServedPodsASUDNv4, _ := buildEgressIPServedPodsAddressSetsForController([]string{v4Pod1IPNode1Net1}, netInfo.GetNetworkName(), DefaultNetworkControllerName) + gomega.Eventually(c.IsAddressSetAvailable).Should(gomega.BeTrue()) + dbIDs := udnenabledsvc.GetAddressSetDBIDs() + udnEnabledSvcV4, _ := addressset.GetTestDbAddrSets(dbIDs, []string{}) + + node1LRP := "k8s-node1" + expectedDatabaseStateTwoEgressNodes := []libovsdbtest.TestData{ + // CDN + getReRouteStaticRoute(v4ClusterSubnet, nodeLogicalRouterIPv4[0]), + getReRoutePolicy(podV4IP, "4", "reroute-UUID", []string{nodeLogicalRouterIPv4[0], v4Node2Tsp}, + getEgressIPLRPReRouteDbIDs(eIP.Name, egressPodCDNLocal.Namespace, egressPodCDNLocal.Name, IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs()), + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4ClusterSubnet, v4ClusterSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4ClusterSubnet, config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouter{ + Name: ovntypes.GWRouterPrefix + node1.Name, + UUID: ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Ports: []string{ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID"}, + Nat: []string{"egressip-nat-UUID", "egressip-nat2-UUID"}, + }, + &nbdb.LogicalRouter{ + Name: ovntypes.OVNClusterRouter, + UUID: ovntypes.OVNClusterRouter + "-UUID", + Policies: []string{"default-no-reroute-UUID", "no-reroute-service-UUID", + "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic", "reroute-UUID"}, + StaticRoutes: []string{"reroute-static-route-UUID"}, + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name, + Networks: []string{"100.64.0.2/29"}, + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", + egressIPServedPodsASCDNv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": ovntypes.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + node1Name + "-UUID", + Name: "k8s-" + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1CDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: node1Name + "-UUID", + Name: node1Name, + Ports: []string{"k8s-" + node1Name + "-UUID"}, + QOSRules: []string{"default-QoS-UUID"}, + }, + &nbdb.NAT{ + UUID: "egressip-nat-UUID", + LogicalIP: podV4IP2, + ExternalIP: egressIP1, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPodCDNRemote.Namespace, egressPodCDNRemote.Name, IPFamilyValueV4, DefaultNetworkControllerName).GetExternalIDs(), + Type: nbdb.NATTypeSNAT, + LogicalPort: &node1LRP, + Options: map[string]string{ + "stateless": "false", + }, + }, + &nbdb.NAT{ + UUID: "egressip-nat2-UUID", + LogicalIP: podV4IP, + ExternalIP: egressIP1, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPodCDNLocal.Namespace, egressPodCDNLocal.Name, IPFamilyValueV4, DefaultNetworkControllerName).GetExternalIDs(), + Type: nbdb.NATTypeSNAT, + LogicalPort: &node1LRP, + Options: map[string]string{ + "stateless": "false", + }, + }, + getNoReRouteReplyTrafficPolicy(ovntypes.DefaultNetworkName, DefaultNetworkControllerName), + getDefaultQoSRule(false, ovntypes.DefaultNetworkName, DefaultNetworkControllerName), + egressSVCServedPodsASv4, + egressIPServedPodsASCDNv4, + egressNodeIPsASv4, + + // UDN + getReRouteStaticRouteForController(v4Net1, nodeLogicalRouterIPv4[0], netInfo.GetNetworkName()), + getReRoutePolicyForController(egressIPName, eipNamespace2, podName2, v4Pod1IPNode1Net1, eIP1Mark, IPFamilyValueV4, []string{nodeLogicalRouterIPv4[0], v4Node2Tsp}, netInfo.GetNetworkName(), DefaultNetworkControllerName), + getGWPktMarkLRPForController(eIP1Mark, egressIPName, eipNamespace2, podName2, v4Pod1IPNode1Net1, IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName), + getGWPktMarkLRPForController(eIP1Mark, egressIPName, eipNamespace2, podName4, v4Pod2IPNode2Net1, IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName), + getNoReRoutePolicyForUDNEnabledSvc(false, netInfo.GetNetworkName(), DefaultNetworkControllerName, egressIPServedPodsASUDNv4.Name, egressSVCServedPodsASv4.Name, udnEnabledSvcV4.Name), + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4Net1, v4Net1), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "udn-default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4Net1, config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "udn-no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", + egressIPServedPodsASUDNv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "udn-default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": ovntypes.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name, + Networks: []string{nodeLogicalRouterIfAddrV4}, + }, + &nbdb.LogicalRouter{ + Name: netInfo.GetNetworkScopedClusterRouterName(), + UUID: netInfo.GetNetworkScopedClusterRouterName() + "-UUID", + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + Policies: []string{"udn-default-no-reroute-node-UUID", "udn-default-no-reroute-UUID", "udn-no-reroute-service-UUID", "udn-enabled-svc-no-reroute-UUID", + fmt.Sprintf("%s-no-reroute-reply-traffic", netInfo.GetNetworkName()), + getReRoutePolicyUUID(eipNamespace2, podName2, IPFamilyValueV4, netInfo.GetNetworkName())}, + StaticRoutes: []string{fmt.Sprintf("%s-reroute-static-route-UUID", netInfo.GetNetworkName())}, + }, + &nbdb.LogicalRouter{ + UUID: netInfo.GetNetworkScopedGWRouterName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedGWRouterName(node1.Name), + Ports: []string{ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + Policies: []string{getGWPktMarkLRPUUID(eipNamespace2, podName2, IPFamilyValueV4, netInfo.GetNetworkName()), + getGWPktMarkLRPUUID(eipNamespace2, podName4, IPFamilyValueV4, netInfo.GetNetworkName())}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + networkName1_ + node1Name + "-UUID", + Name: "k8s-" + networkName1_ + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedSwitchName(node1.Name), + Ports: []string{"k8s-" + networkName1_ + node1Name + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + QOSRules: []string{fmt.Sprintf("%s-QoS-UUID", netInfo.GetNetworkName())}, + }, + getNoReRouteReplyTrafficPolicyForController(netInfo.GetNetworkName(), DefaultNetworkControllerName), + getDefaultQoSRule(false, netInfo.GetNetworkName(), DefaultNetworkControllerName), + egressIPServedPodsASUDNv4, + udnEnabledSvcV4, + } + ginkgo.By("ensure expected equals actual") + gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseStateTwoEgressNodes)) + ginkgo.By("patch EgressIP status to ensure remote node is egressable only") + oneNodeStatus := []egressipv1.EgressIPStatusItem{ + { + Node: node2Name, + EgressIP: egressIP2, + }, + } + err = patchEgressIP(fakeOvn.controller.kube.PatchEgressIP, eIP.Name, generateEgressIPPatches(eIP1Mark, oneNodeStatus)...) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Eventually(getEgressIPStatusLen(eIP.Name)).Should(gomega.Equal(1)) + expectedDatabaseStateOneEgressNode := []libovsdbtest.TestData{ + // CDN + getReRouteStaticRoute(v4ClusterSubnet, nodeLogicalRouterIPv4[0]), + getReRoutePolicy(podV4IP, "4", "reroute-UUID", []string{v4Node2Tsp}, + getEgressIPLRPReRouteDbIDs(eIP.Name, egressPodCDNLocal.Namespace, egressPodCDNLocal.Name, IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs()), + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4ClusterSubnet, v4ClusterSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4ClusterSubnet, config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouter{ + Name: ovntypes.GWRouterPrefix + node1.Name, + UUID: ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Ports: []string{ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID"}, + }, + &nbdb.LogicalRouter{ + Name: ovntypes.OVNClusterRouter, + UUID: ovntypes.OVNClusterRouter + "-UUID", + Policies: []string{"default-no-reroute-UUID", "no-reroute-service-UUID", + "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic", "reroute-UUID"}, + StaticRoutes: []string{"reroute-static-route-UUID"}, + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name, + Networks: []string{"100.64.0.2/29"}, + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", + egressIPServedPodsASCDNv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": ovntypes.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + node1Name + "-UUID", + Name: "k8s-" + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1CDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: node1Name + "-UUID", + Name: node1Name, + Ports: []string{"k8s-" + node1Name + "-UUID"}, + QOSRules: []string{"default-QoS-UUID"}, + }, + getNoReRouteReplyTrafficPolicy(ovntypes.DefaultNetworkName, DefaultNetworkControllerName), + getDefaultQoSRule(false, ovntypes.DefaultNetworkName, DefaultNetworkControllerName), + egressSVCServedPodsASv4, + egressIPServedPodsASCDNv4, + egressNodeIPsASv4, + + // UDN + getReRouteStaticRouteForController(v4Net1, nodeLogicalRouterIPv4[0], netInfo.GetNetworkName()), + getReRoutePolicyForController(egressIPName, eipNamespace2, podName2, v4Pod1IPNode1Net1, eIP1Mark, IPFamilyValueV4, []string{v4Node2Tsp}, netInfo.GetNetworkName(), DefaultNetworkControllerName), + getNoReRoutePolicyForUDNEnabledSvc(false, netInfo.GetNetworkName(), DefaultNetworkControllerName, egressIPServedPodsASUDNv4.Name, egressSVCServedPodsASv4.Name, udnEnabledSvcV4.Name), + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4Net1, v4Net1), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "udn-default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4Net1, config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "udn-no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", + egressIPServedPodsASUDNv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "udn-default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": ovntypes.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name, + Networks: []string{nodeLogicalRouterIfAddrV4}, + }, + &nbdb.LogicalRouter{ + Name: netInfo.GetNetworkScopedClusterRouterName(), + UUID: netInfo.GetNetworkScopedClusterRouterName() + "-UUID", + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + Policies: []string{"udn-default-no-reroute-node-UUID", "udn-default-no-reroute-UUID", "udn-no-reroute-service-UUID", "udn-enabled-svc-no-reroute-UUID", + fmt.Sprintf("%s-no-reroute-reply-traffic", netInfo.GetNetworkName()), + getReRoutePolicyUUID(eipNamespace2, podName2, IPFamilyValueV4, netInfo.GetNetworkName())}, + StaticRoutes: []string{fmt.Sprintf("%s-reroute-static-route-UUID", netInfo.GetNetworkName())}, + }, + &nbdb.LogicalRouter{ + UUID: netInfo.GetNetworkScopedGWRouterName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedGWRouterName(node1.Name), + Ports: []string{ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + networkName1_ + node1Name + "-UUID", + Name: "k8s-" + networkName1_ + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedSwitchName(node1.Name), + Ports: []string{"k8s-" + networkName1_ + node1Name + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + QOSRules: []string{fmt.Sprintf("%s-QoS-UUID", netInfo.GetNetworkName())}, + }, + getNoReRouteReplyTrafficPolicyForController(netInfo.GetNetworkName(), DefaultNetworkControllerName), + getDefaultQoSRule(false, netInfo.GetNetworkName(), DefaultNetworkControllerName), + egressIPServedPodsASUDNv4, + udnEnabledSvcV4, + } + ginkgo.By("ensure expected equals actual") + gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseStateOneEgressNode)) + + ginkgo.By("restore both nodes as egressable") + err = patchEgressIP(fakeOvn.controller.kube.PatchEgressIP, eIP.Name, generateEgressIPPatches(eIP1Mark, twoNodeStatus)...) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Eventually(getEgressIPStatusLen(eIP.Name)).Should(gomega.Equal(2)) + ginkgo.By("ensure expected equals actual") + gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseStateTwoEgressNodes)) + return nil + } + err := app.Run([]string{app.Name}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + }) + + ginkgo.Context("EgressIP delete", func() { + ginkgo.It("should del UDN and CDN config", func() { + // Test steps: + // One EIP selecting a pod on an UDN and another pod on a CDN + // EIP egresses locally and remote + // Delete EIP + app.Action = func(ctx *cli.Context) error { + // Node 1 is local, Node 2 is remote + egressIP1 := "192.168.126.101" + egressIP2 := "192.168.126.102" + node1IPv4 := "192.168.126.202" + node1IPv4CIDR := node1IPv4 + "/24" + node2IPv4 := "192.168.126.51" + node2IPv4CIDR := node2IPv4 + "/24" + _, node1CDNSubnet, _ := net.ParseCIDR(v4Node1Subnet) + _, node1UDNSubnet, _ := net.ParseCIDR(v4Node1Net1) + nadName := util.GetNADName(eipNamespace2, nadName1) + egressCDNNamespace := newNamespaceWithLabels(eipNamespace, egressPodLabel) + egressUDNNamespace := newNamespaceWithLabels(eipNamespace2, egressPodLabel) + egressPodCDNLocal := *newPodWithLabels(eipNamespace, podName, node1Name, podV4IP, egressPodLabel) + egressPodUDNLocal := *newPodWithLabels(eipNamespace2, podName2, node1Name, v4Pod1IPNode1Net1, egressPodLabel) + egressPodCDNRemote := *newPodWithLabels(eipNamespace, podName3, node2Name, podV4IP2, egressPodLabel) + setPrimaryNetworkAnnot(&egressPodCDNRemote, ovntypes.DefaultNetworkName, fmt.Sprintf("%s%s", podV4IP2, util.GetIPFullMaskString(podV4IP2))) + egressPodUDNRemote := *newPodWithLabels(eipNamespace2, podName4, node2Name, v4Pod2IPNode2Net1, egressPodLabel) + setPrimaryNetworkAnnot(&egressPodUDNRemote, nadName, fmt.Sprintf("%s%s", v4Pod2IPNode2Net1, util.GetIPFullMaskString(v4Pod2IPNode2Net1))) + netconf := ovncnitypes.NetConf{ + NetConf: cnitypes.NetConf{ + Name: networkName1, + Type: "ovn-k8s-cni-overlay", + }, + Role: ovntypes.NetworkRolePrimary, + Topology: ovntypes.Layer3Topology, + NADName: nadName, + Subnets: v4Net1, + } + nad, err := newNetworkAttachmentDefinition( + eipNamespace2, + nadName1, + netconf, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + netInfo, err := util.NewNetInfo(&netconf) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + node1Annotations := map[string]string{ + "k8s.ovn.org/node-primary-ifaddr": fmt.Sprintf("{\"ipv4\": \"%s\", \"ipv6\": \"%s\"}", node1IPv4CIDR, ""), + "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":\"%s\",\"%s\":\"%s\"}", v4Node1Subnet, networkName1, v4Node1Net1), + "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node1Tsp), + "k8s.ovn.org/zone-name": node1Name, + "k8s.ovn.org/remote-zone-migrated": node1Name, + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), + } + labels := map[string]string{ + "k8s.ovn.org/egress-assignable": "", + } + node1 := getNodeObj(node1Name, node1Annotations, labels) + node2Annotations := map[string]string{ + "k8s.ovn.org/node-primary-ifaddr": fmt.Sprintf("{\"ipv4\": \"%s\", \"ipv6\": \"%s\"}", node2IPv4CIDR, ""), + "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":\"%s\",\"%s\":\"%s\"}", v4Node2Subnet, networkName1, v4Node2Net1), + "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node2Tsp), + "k8s.ovn.org/zone-name": node2Name, + "k8s.ovn.org/remote-zone-migrated": node2Name, + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), + } + node2 := getNodeObj(node2Name, node2Annotations, labels) + twoNodeStatus := []egressipv1.EgressIPStatusItem{ + { + Node: node1Name, + EgressIP: egressIP1, + }, + { + Node: node2Name, + EgressIP: egressIP2, + }, + } + eIP := egressipv1.EgressIP{ + ObjectMeta: newEgressIPMetaWithMark(egressIPName, eIP1Mark), + Spec: egressipv1.EgressIPSpec{ + EgressIPs: []string{egressIP1, egressIP2}, + PodSelector: metav1.LabelSelector{ + MatchLabels: egressPodLabel, + }, + NamespaceSelector: metav1.LabelSelector{ + MatchLabels: egressPodLabel, + }, + }, + Status: egressipv1.EgressIPStatus{ + Items: twoNodeStatus, + }, + } + ginkgo.By("create EgressIP that selects pods in a CDN and UDN") + initialDB := []libovsdbtest.TestData{ + //CDN start + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name, + Networks: []string{nodeLogicalRouterIfAddrV4}, + }, + &nbdb.LogicalRouter{ + Name: ovntypes.OVNClusterRouter, + UUID: ovntypes.OVNClusterRouter + "-UUID", + }, + &nbdb.LogicalRouter{ + Name: ovntypes.GWRouterPrefix + node1.Name, + UUID: ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Ports: []string{ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID"}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + node1Name + "-UUID", + Name: "k8s-" + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1CDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: node1Name + "-UUID", + Name: node1Name, + Ports: []string{"k8s-" + node1Name + "-UUID"}, + }, + // UDN start + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name, + Networks: []string{nodeLogicalRouterIfAddrV4}, + }, + &nbdb.LogicalRouter{ + Name: netInfo.GetNetworkScopedClusterRouterName(), + UUID: netInfo.GetNetworkScopedClusterRouterName() + "-UUID", + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: networkName1, ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + }, + &nbdb.LogicalRouter{ + UUID: netInfo.GetNetworkScopedGWRouterName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedGWRouterName(node1.Name), + Ports: []string{ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: networkName1, ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + networkName1_ + node1Name + "-UUID", + Name: "k8s-" + networkName1_ + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedSwitchName(node1.Name), + Ports: []string{"k8s-" + networkName1_ + node1Name + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: networkName1, ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + }, + } + fakeOvn.startWithDBSetup( + libovsdbtest.TestSetup{ + NBData: initialDB, + }, + &corev1.NodeList{ + Items: []corev1.Node{node1, node2}, + }, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressCDNNamespace, *egressUDNNamespace}, + }, + &corev1.PodList{ + Items: []corev1.Pod{egressPodCDNLocal, egressPodUDNLocal, egressPodCDNRemote, egressPodUDNRemote}, + }, + &nadv1.NetworkAttachmentDefinitionList{ + Items: []nadv1.NetworkAttachmentDefinition{*nad}, + }, + ) + asf := addressset.NewOvnAddressSetFactory(fakeOvn.nbClient, true, false) + // watch EgressIP depends on UDN enabled svcs address set being available + c := udnenabledsvc.NewController(fakeOvn.nbClient, asf, fakeOvn.controller.watchFactory.ServiceCoreInformer(), []string{}) + go func() { + gomega.Expect(c.Run(ctx.Done())).Should(gomega.Succeed()) + }() + // Add pod IPs to CDN cache + iCDN, nCDN, _ := net.ParseCIDR(podV4IP + "/23") + nCDN.IP = iCDN + fakeOvn.controller.logicalPortCache.add(&egressPodCDNLocal, "", ovntypes.DefaultNetworkName, "", nil, []*net.IPNet{nCDN}) + fakeOvn.controller.zone = node1.Name + fakeOvn.eIPController.zone = node1.Name + err = fakeOvn.nadController.Start() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressIPNamespaces() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressIPPods() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressNodes() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressIP() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + secConInfo, ok := fakeOvn.secondaryControllers[networkName1] + gomega.Expect(ok).To(gomega.BeTrue()) + // Add pod IPs to UDN cache + iUDN, nUDN, _ := net.ParseCIDR(v4Pod1IPNode1Net1 + "/23") + nUDN.IP = iUDN + secConInfo.bnc.logicalPortCache.add(&egressPodUDNLocal, "", util.GetNADName(nad.Namespace, nad.Name), "", nil, []*net.IPNet{nUDN}) + _, err = fakeOvn.fakeClient.EgressIPClient.K8sV1().EgressIPs().Create(context.TODO(), &eIP, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) + egressIPServedPodsASCDNv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}, ovntypes.DefaultNetworkName, DefaultNetworkControllerName) + egressNodeIPsASv4, _ := buildEgressIPNodeAddressSets([]string{node1IPv4, node2IPv4}) + egressIPServedPodsASUDNv4, _ := buildEgressIPServedPodsAddressSetsForController([]string{v4Pod1IPNode1Net1}, netInfo.GetNetworkName(), DefaultNetworkControllerName) + gomega.Eventually(c.IsAddressSetAvailable).Should(gomega.BeTrue()) + dbIDs := udnenabledsvc.GetAddressSetDBIDs() + udnEnabledSvcV4, _ := addressset.GetTestDbAddrSets(dbIDs, []string{}) + node1LRP := "k8s-node1" + expectedDatabaseStateTwoEgressNodes := []libovsdbtest.TestData{ + // CDN + getReRouteStaticRoute(v4ClusterSubnet, nodeLogicalRouterIPv4[0]), + getReRoutePolicy(podV4IP, "4", "reroute-UUID", []string{nodeLogicalRouterIPv4[0], v4Node2Tsp}, + getEgressIPLRPReRouteDbIDs(eIP.Name, egressPodCDNLocal.Namespace, egressPodCDNLocal.Name, IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs()), + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4ClusterSubnet, v4ClusterSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4ClusterSubnet, config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouter{ + Name: ovntypes.GWRouterPrefix + node1.Name, + UUID: ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Ports: []string{ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID"}, + Nat: []string{"egressip-nat-UUID", "egressip-nat2-UUID"}, + }, + &nbdb.LogicalRouter{ + Name: ovntypes.OVNClusterRouter, + UUID: ovntypes.OVNClusterRouter + "-UUID", + Policies: []string{"default-no-reroute-UUID", "no-reroute-service-UUID", + "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic", "reroute-UUID"}, + StaticRoutes: []string{"reroute-static-route-UUID"}, + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name, + Networks: []string{"100.64.0.2/29"}, + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", + egressIPServedPodsASCDNv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": ovntypes.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + node1Name + "-UUID", + Name: "k8s-" + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1CDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: node1Name + "-UUID", + Name: node1Name, + Ports: []string{"k8s-" + node1Name + "-UUID"}, + QOSRules: []string{"default-QoS-UUID"}, + }, + &nbdb.NAT{ + UUID: "egressip-nat-UUID", + LogicalIP: podV4IP2, + ExternalIP: egressIP1, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPodCDNRemote.Namespace, egressPodCDNRemote.Name, IPFamilyValueV4, DefaultNetworkControllerName).GetExternalIDs(), + Type: nbdb.NATTypeSNAT, + LogicalPort: &node1LRP, + Options: map[string]string{ + "stateless": "false", + }, + }, + &nbdb.NAT{ + UUID: "egressip-nat2-UUID", + LogicalIP: podV4IP, + ExternalIP: egressIP1, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPodCDNLocal.Namespace, egressPodCDNLocal.Name, IPFamilyValueV4, DefaultNetworkControllerName).GetExternalIDs(), + Type: nbdb.NATTypeSNAT, + LogicalPort: &node1LRP, + Options: map[string]string{ + "stateless": "false", + }, + }, + getNoReRouteReplyTrafficPolicy(ovntypes.DefaultNetworkName, DefaultNetworkControllerName), + getDefaultQoSRule(false, ovntypes.DefaultNetworkName, DefaultNetworkControllerName), + egressSVCServedPodsASv4, + egressIPServedPodsASCDNv4, + egressNodeIPsASv4, + + // UDN + getReRouteStaticRouteForController(v4Net1, nodeLogicalRouterIPv4[0], netInfo.GetNetworkName()), + getReRoutePolicyForController(egressIPName, eipNamespace2, podName2, v4Pod1IPNode1Net1, eIP1Mark, IPFamilyValueV4, []string{nodeLogicalRouterIPv4[0], v4Node2Tsp}, netInfo.GetNetworkName(), DefaultNetworkControllerName), + getGWPktMarkLRPForController(eIP1Mark, egressIPName, eipNamespace2, podName2, v4Pod1IPNode1Net1, IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName), + getGWPktMarkLRPForController(eIP1Mark, egressIPName, eipNamespace2, podName4, v4Pod2IPNode2Net1, IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName), + getNoReRoutePolicyForUDNEnabledSvc(false, netInfo.GetNetworkName(), DefaultNetworkControllerName, egressIPServedPodsASUDNv4.Name, egressSVCServedPodsASv4.Name, udnEnabledSvcV4.Name), + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4Net1, v4Net1), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "udn-default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4Net1, config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "udn-no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", + egressIPServedPodsASUDNv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "udn-default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": ovntypes.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name, + Networks: []string{nodeLogicalRouterIfAddrV4}, + }, + &nbdb.LogicalRouter{ + Name: netInfo.GetNetworkScopedClusterRouterName(), + UUID: netInfo.GetNetworkScopedClusterRouterName() + "-UUID", + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + Policies: []string{"udn-default-no-reroute-node-UUID", "udn-default-no-reroute-UUID", "udn-no-reroute-service-UUID", "udn-enabled-svc-no-reroute-UUID", + fmt.Sprintf("%s-no-reroute-reply-traffic", netInfo.GetNetworkName()), + getReRoutePolicyUUID(eipNamespace2, podName2, IPFamilyValueV4, netInfo.GetNetworkName())}, + StaticRoutes: []string{fmt.Sprintf("%s-reroute-static-route-UUID", netInfo.GetNetworkName())}, + }, + &nbdb.LogicalRouter{ + UUID: netInfo.GetNetworkScopedGWRouterName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedGWRouterName(node1.Name), + Ports: []string{ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + Policies: []string{getGWPktMarkLRPUUID(eipNamespace2, podName2, IPFamilyValueV4, netInfo.GetNetworkName()), + getGWPktMarkLRPUUID(eipNamespace2, podName4, IPFamilyValueV4, netInfo.GetNetworkName())}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + networkName1_ + node1Name + "-UUID", + Name: "k8s-" + networkName1_ + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedSwitchName(node1.Name), + Ports: []string{"k8s-" + networkName1_ + node1Name + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + QOSRules: []string{fmt.Sprintf("%s-QoS-UUID", netInfo.GetNetworkName())}, + }, + getNoReRouteReplyTrafficPolicyForController(netInfo.GetNetworkName(), DefaultNetworkControllerName), + getDefaultQoSRule(false, netInfo.GetNetworkName(), DefaultNetworkControllerName), + egressIPServedPodsASUDNv4, + udnEnabledSvcV4, + } + ginkgo.By("ensure expected equals actual") + gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseStateTwoEgressNodes)) + ginkgo.By("delete EgressIP") + err = fakeOvn.fakeClient.EgressIPClient.K8sV1().EgressIPs().Delete(context.TODO(), eIP.Name, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + egressIPServedPodsASCDNv4.Addresses = nil + egressIPServedPodsASUDNv4.Addresses = nil + expectedDatabaseState := []libovsdbtest.TestData{ + // CDN + getReRouteStaticRoute(v4ClusterSubnet, nodeLogicalRouterIPv4[0]), + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4ClusterSubnet, v4ClusterSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4ClusterSubnet, config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouter{ + Name: ovntypes.GWRouterPrefix + node1.Name, + UUID: ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Ports: []string{ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID"}, + }, + &nbdb.LogicalRouter{ + Name: ovntypes.OVNClusterRouter, + UUID: ovntypes.OVNClusterRouter + "-UUID", + Policies: []string{"default-no-reroute-UUID", "no-reroute-service-UUID", + "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, + StaticRoutes: []string{"reroute-static-route-UUID"}, + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name, + Networks: []string{"100.64.0.2/29"}, + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", + egressIPServedPodsASCDNv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": ovntypes.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + node1Name + "-UUID", + Name: "k8s-" + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1CDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: node1Name + "-UUID", + Name: node1Name, + Ports: []string{"k8s-" + node1Name + "-UUID"}, + QOSRules: []string{"default-QoS-UUID"}, + }, + getNoReRouteReplyTrafficPolicy(ovntypes.DefaultNetworkName, DefaultNetworkControllerName), + getDefaultQoSRule(false, ovntypes.DefaultNetworkName, DefaultNetworkControllerName), + egressSVCServedPodsASv4, + egressIPServedPodsASCDNv4, + egressNodeIPsASv4, + + // UDN + getReRouteStaticRouteForController(v4Net1, nodeLogicalRouterIPv4[0], netInfo.GetNetworkName()), + getNoReRoutePolicyForUDNEnabledSvc(false, netInfo.GetNetworkName(), DefaultNetworkControllerName, egressIPServedPodsASUDNv4.Name, egressSVCServedPodsASv4.Name, udnEnabledSvcV4.Name), + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4Net1, v4Net1), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "udn-default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4Net1, config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "udn-no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", + egressIPServedPodsASUDNv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "udn-default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": ovntypes.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name, + Networks: []string{nodeLogicalRouterIfAddrV4}, + }, + &nbdb.LogicalRouter{ + Name: netInfo.GetNetworkScopedClusterRouterName(), + UUID: netInfo.GetNetworkScopedClusterRouterName() + "-UUID", + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: secConInfo.bnc.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + Policies: []string{"udn-default-no-reroute-node-UUID", "udn-default-no-reroute-UUID", "udn-no-reroute-service-UUID", "udn-enabled-svc-no-reroute-UUID", + fmt.Sprintf("%s-no-reroute-reply-traffic", netInfo.GetNetworkName()), + }, + StaticRoutes: []string{fmt.Sprintf("%s-reroute-static-route-UUID", netInfo.GetNetworkName())}, + }, + &nbdb.LogicalRouter{ + UUID: netInfo.GetNetworkScopedGWRouterName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedGWRouterName(node1.Name), + Ports: []string{ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: secConInfo.bnc.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + networkName1_ + node1Name + "-UUID", + Name: "k8s-" + networkName1_ + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedSwitchName(node1.Name), + Ports: []string{"k8s-" + networkName1_ + node1Name + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + QOSRules: []string{fmt.Sprintf("%s-QoS-UUID", netInfo.GetNetworkName())}, + }, + getNoReRouteReplyTrafficPolicyForController(netInfo.GetNetworkName(), DefaultNetworkControllerName), + getDefaultQoSRule(false, netInfo.GetNetworkName(), DefaultNetworkControllerName), + egressIPServedPodsASUDNv4, + udnEnabledSvcV4, + } + ginkgo.By("ensure expected equals actual") + gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) + return nil + } + err := app.Run([]string{app.Name}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + }) + + ginkgo.Context("Namespace update", func() { + ginkgo.It("should update UDN and CDN config", func() { + // Test steps: + // create an EIP not selecting a pod on an UDN and another pod on a CDN because namespace labels aren't selected + // EIP egresses locally and remote + // Update namespace to match EIP selectors + app.Action = func(ctx *cli.Context) error { + // Node 1 is local, Node 2 is remote + egressIP1 := "192.168.126.101" + egressIP2 := "192.168.126.102" + node1IPv4 := "192.168.126.202" + node1IPv4CIDR := node1IPv4 + "/24" + node2IPv4 := "192.168.126.51" + node2IPv4CIDR := node2IPv4 + "/24" + _, node1CDNSubnet, _ := net.ParseCIDR(v4Node1Subnet) + _, node1UDNSubnet, _ := net.ParseCIDR(v4Node1Net1) + egressCDNNamespace := newNamespaceWithLabels(eipNamespace, nil) + egressUDNNamespace := newNamespaceWithLabels(eipNamespace2, nil) + egressPodCDN := *newPodWithLabels(eipNamespace, podName, node1Name, podV4IP, egressPodLabel) + egressPodUDN := *newPodWithLabels(eipNamespace2, podName2, node1Name, podV4IP2, egressPodLabel) + + nadNsName := util.GetNADName(eipNamespace2, nadName1) + netconf := ovncnitypes.NetConf{ + NetConf: cnitypes.NetConf{ + Name: networkName1, + Type: "ovn-k8s-cni-overlay", + }, + Role: ovntypes.NetworkRolePrimary, + Topology: ovntypes.Layer3Topology, + NADName: nadNsName, + Subnets: v4Net1, + } + nad, err := newNetworkAttachmentDefinition( + eipNamespace2, + nadName1, + netconf, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + netInfo, err := util.NewNetInfo(&netconf) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + node1Annotations := map[string]string{ + "k8s.ovn.org/node-primary-ifaddr": fmt.Sprintf("{\"ipv4\": \"%s\", \"ipv6\": \"%s\"}", node1IPv4CIDR, ""), + "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":\"%s\",\"%s\":\"%s\"}", v4Node1Subnet, networkName1, v4Node1Net1), + "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node1Tsp), + "k8s.ovn.org/zone-name": node1Name, + "k8s.ovn.org/remote-zone-migrated": node1Name, + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), + } + labels := map[string]string{ + "k8s.ovn.org/egress-assignable": "", + } + node1 := getNodeObj(node1Name, node1Annotations, labels) + node2Annotations := map[string]string{ + "k8s.ovn.org/node-primary-ifaddr": fmt.Sprintf("{\"ipv4\": \"%s\", \"ipv6\": \"%s\"}", node2IPv4CIDR, ""), + "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":\"%s\",\"%s\":\"%s\"}", v4Node2Subnet, networkName1, v4Node2Net1), + "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node2Tsp), + "k8s.ovn.org/zone-name": node2Name, + "k8s.ovn.org/remote-zone-migrated": node2Name, + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), + } + node2 := getNodeObj(node2Name, node2Annotations, labels) + twoNodeStatus := []egressipv1.EgressIPStatusItem{ + { + Node: node1Name, + EgressIP: egressIP1, + }, + { + Node: node2Name, + EgressIP: egressIP2, + }, + } + eIP := egressipv1.EgressIP{ + ObjectMeta: newEgressIPMetaWithMark(egressIPName, eIP1Mark), + Spec: egressipv1.EgressIPSpec{ + EgressIPs: []string{egressIP1, egressIP2}, + PodSelector: metav1.LabelSelector{ + MatchLabels: egressPodLabel, + }, + NamespaceSelector: metav1.LabelSelector{ + MatchLabels: egressPodLabel, + }, + }, + Status: egressipv1.EgressIPStatus{ + Items: twoNodeStatus, + }, + } + ginkgo.By("create EgressIP that doesnt select pods in a CDN and UDN") + initialDB := []libovsdbtest.TestData{ + //CDN start + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name, + Networks: []string{nodeLogicalRouterIfAddrV4}, + }, + &nbdb.LogicalRouter{ + Name: ovntypes.OVNClusterRouter, + UUID: ovntypes.OVNClusterRouter + "-UUID", + }, + &nbdb.LogicalRouter{ + Name: ovntypes.GWRouterPrefix + node1.Name, + UUID: ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Ports: []string{ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID"}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + node1Name + "-UUID", + Name: "k8s-" + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1CDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: node1Name + "-UUID", + Name: node1Name, + Ports: []string{"k8s-" + node1Name + "-UUID"}, + }, + // UDN start + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name, + Networks: []string{nodeLogicalRouterIfAddrV4}, + }, + &nbdb.LogicalRouter{ + Name: netInfo.GetNetworkScopedClusterRouterName(), + UUID: netInfo.GetNetworkScopedClusterRouterName() + "-UUID", + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: networkName1, ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + }, + &nbdb.LogicalRouter{ + UUID: netInfo.GetNetworkScopedGWRouterName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedGWRouterName(node1.Name), + Ports: []string{ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: networkName1, ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + networkName1_ + node1Name + "-UUID", + Name: "k8s-" + networkName1_ + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedSwitchName(node1.Name), + Ports: []string{"k8s-" + networkName1_ + node1Name + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: networkName1, ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + }, + } + fakeOvn.startWithDBSetup( + libovsdbtest.TestSetup{ + NBData: initialDB, + }, + &corev1.NodeList{ + Items: []corev1.Node{node1, node2}, + }, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressCDNNamespace, *egressUDNNamespace}, + }, + &corev1.PodList{ + Items: []corev1.Pod{egressPodCDN, egressPodUDN}, + }, + &nadv1.NetworkAttachmentDefinitionList{ + Items: []nadv1.NetworkAttachmentDefinition{*nad}, + }, + &egressipv1.EgressIPList{ + Items: []egressipv1.EgressIP{eIP}, + }, + ) + asf := addressset.NewOvnAddressSetFactory(fakeOvn.nbClient, true, false) + // watch EgressIP depends on UDN enabled svcs address set being available + c := udnenabledsvc.NewController(fakeOvn.nbClient, asf, fakeOvn.controller.watchFactory.ServiceCoreInformer(), []string{}) + go func() { + gomega.Expect(c.Run(ctx.Done())).Should(gomega.Succeed()) + }() + // Add pod IPs to CDN cache + iCDN, nCDN, _ := net.ParseCIDR(podV4IP + "/23") + nCDN.IP = iCDN + fakeOvn.controller.logicalPortCache.add(&egressPodCDN, "", ovntypes.DefaultNetworkName, "", nil, []*net.IPNet{nCDN}) + fakeOvn.controller.zone = node1Name + fakeOvn.controller.eIPC.zone = node1Name + err = fakeOvn.nadController.Start() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.eIPController.ensureL3ClusterRouterPoliciesForNetwork(netInfo) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.eIPController.ensureL3SwitchPoliciesForNode(netInfo, node1Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressIPNamespaces() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressIPPods() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressNodes() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressIP() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + secConInfo, ok := fakeOvn.secondaryControllers[networkName1] + gomega.Expect(ok).To(gomega.BeTrue()) + // Add pod IPs to UDN cache + iUDN, nUDN, _ := net.ParseCIDR(v4Pod1IPNode1Net1 + "/23") + nUDN.IP = iUDN + secConInfo.bnc.logicalPortCache.add(&egressPodUDN, "", util.GetNADName(nad.Namespace, nad.Name), "", nil, []*net.IPNet{nUDN}) + ginkgo.By("update namespaces with label so its now selected by EgressIP") + egressCDNNamespace.Labels = egressPodLabel + _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Namespaces().Update(context.Background(), egressCDNNamespace, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + egressUDNNamespace.Labels = egressPodLabel + _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Namespaces().Update(context.Background(), egressUDNNamespace, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) + egressIPServedPodsASCDNv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}, ovntypes.DefaultNetworkName, DefaultNetworkControllerName) + egressNodeIPsASv4, _ := buildEgressIPNodeAddressSets([]string{node1IPv4, node2IPv4}) + egressIPServedPodsASUDNv4, _ := buildEgressIPServedPodsAddressSetsForController([]string{v4Pod1IPNode1Net1}, netInfo.GetNetworkName(), DefaultNetworkControllerName) + gomega.Eventually(c.IsAddressSetAvailable).Should(gomega.BeTrue()) + dbIDs := udnenabledsvc.GetAddressSetDBIDs() + udnEnabledSvcV4, _ := addressset.GetTestDbAddrSets(dbIDs, []string{}) + node1LRP := "k8s-node1" + expectedDatabaseStateTwoEgressNodes := []libovsdbtest.TestData{ + // CDN + getReRouteStaticRoute(v4ClusterSubnet, nodeLogicalRouterIPv4[0]), + getReRoutePolicy(podV4IP, "4", "reroute-UUID", []string{nodeLogicalRouterIPv4[0], v4Node2Tsp}, + getEgressIPLRPReRouteDbIDs(eIP.Name, egressPodCDN.Namespace, egressPodCDN.Name, IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs()), + &nbdb.NAT{ + UUID: "egressip-nat-UUID", + LogicalIP: podV4IP, + ExternalIP: egressIP1, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPodCDN.Namespace, egressPodCDN.Name, IPFamilyValueV4, DefaultNetworkControllerName).GetExternalIDs(), + Type: nbdb.NATTypeSNAT, + LogicalPort: &node1LRP, + Options: map[string]string{ + "stateless": "false", + }, + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4ClusterSubnet, v4ClusterSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4ClusterSubnet, config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouter{ + Name: ovntypes.GWRouterPrefix + node1.Name, + UUID: ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Ports: []string{ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID"}, + Nat: []string{"egressip-nat-UUID"}, + }, + &nbdb.LogicalRouter{ + Name: ovntypes.OVNClusterRouter, + UUID: ovntypes.OVNClusterRouter + "-UUID", + Policies: []string{"default-no-reroute-UUID", "no-reroute-service-UUID", + "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic", "reroute-UUID"}, + StaticRoutes: []string{"reroute-static-route-UUID"}, + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name, + Networks: []string{"100.64.0.2/29"}, + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", + egressIPServedPodsASCDNv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": ovntypes.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + node1Name + "-UUID", + Name: "k8s-" + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1CDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: node1Name + "-UUID", + Name: node1Name, + Ports: []string{"k8s-" + node1Name + "-UUID"}, + QOSRules: []string{"default-QoS-UUID"}, + }, + getNoReRouteReplyTrafficPolicy(ovntypes.DefaultNetworkName, DefaultNetworkControllerName), + getDefaultQoSRule(false, ovntypes.DefaultNetworkName, DefaultNetworkControllerName), + egressSVCServedPodsASv4, + egressIPServedPodsASCDNv4, + egressNodeIPsASv4, + + // UDN + getReRouteStaticRouteForController(v4Net1, nodeLogicalRouterIPv4[0], netInfo.GetNetworkName()), + getReRoutePolicyForController(egressIPName, eipNamespace2, podName2, v4Pod1IPNode1Net1, eIP1Mark, IPFamilyValueV4, []string{nodeLogicalRouterIPv4[0], v4Node2Tsp}, netInfo.GetNetworkName(), DefaultNetworkControllerName), + getGWPktMarkLRPForController(eIP1Mark, egressIPName, eipNamespace2, podName2, v4Pod1IPNode1Net1, IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName), + getNoReRoutePolicyForUDNEnabledSvc(false, netInfo.GetNetworkName(), DefaultNetworkControllerName, egressIPServedPodsASUDNv4.Name, egressSVCServedPodsASv4.Name, udnEnabledSvcV4.Name), + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4Net1, v4Net1), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "udn-default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4Net1, config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "udn-no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", + egressIPServedPodsASUDNv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "udn-default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": ovntypes.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name, + Networks: []string{nodeLogicalRouterIfAddrV4}, + }, + &nbdb.LogicalRouter{ + Name: netInfo.GetNetworkScopedClusterRouterName(), + UUID: netInfo.GetNetworkScopedClusterRouterName() + "-UUID", + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + Policies: []string{"udn-default-no-reroute-node-UUID", "udn-default-no-reroute-UUID", "udn-no-reroute-service-UUID", "udn-enabled-svc-no-reroute-UUID", + fmt.Sprintf("%s-no-reroute-reply-traffic", netInfo.GetNetworkName()), + getReRoutePolicyUUID(eipNamespace2, podName2, IPFamilyValueV4, netInfo.GetNetworkName())}, + StaticRoutes: []string{fmt.Sprintf("%s-reroute-static-route-UUID", netInfo.GetNetworkName())}, + }, + &nbdb.LogicalRouter{ + UUID: netInfo.GetNetworkScopedGWRouterName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedGWRouterName(node1.Name), + Ports: []string{ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + Policies: []string{getGWPktMarkLRPUUID(eipNamespace2, podName2, IPFamilyValueV4, netInfo.GetNetworkName())}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + networkName1_ + node1Name + "-UUID", + Name: "k8s-" + networkName1_ + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedSwitchName(node1.Name), + Ports: []string{"k8s-" + networkName1_ + node1Name + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + QOSRules: []string{fmt.Sprintf("%s-QoS-UUID", netInfo.GetNetworkName())}, + }, + getNoReRouteReplyTrafficPolicyForController(netInfo.GetNetworkName(), DefaultNetworkControllerName), + getDefaultQoSRule(false, netInfo.GetNetworkName(), DefaultNetworkControllerName), + egressIPServedPodsASUDNv4, + udnEnabledSvcV4, + } + ginkgo.By("ensure expected equals actual") + gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseStateTwoEgressNodes)) + return nil + } + err := app.Run([]string{app.Name}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + }) + + ginkgo.Context("Namespace delete", func() { + ginkgo.It("should delete UDN and CDN config", func() { + // Test steps: + // create an EIP selecting a pod on an UDN and another pod on a CDN + // EIP egresses locally and remote + // Delete namespace + app.Action = func(ctx *cli.Context) error { + // Node 1 is local, Node 2 is remote + egressIP1 := "192.168.126.101" + egressIP2 := "192.168.126.102" + node1IPv4 := "192.168.126.202" + node1IPv4CIDR := node1IPv4 + "/24" + node2IPv4 := "192.168.126.51" + node2IPv4CIDR := node2IPv4 + "/24" + _, node1CDNSubnet, _ := net.ParseCIDR(v4Node1Subnet) + _, node1UDNSubnet, _ := net.ParseCIDR(v4Node1Net1) + egressCDNNamespace := newNamespaceWithLabels(eipNamespace, egressPodLabel) + egressUDNNamespace := newNamespaceWithLabels(eipNamespace2, egressPodLabel) + egressPodCDN := *newPodWithLabels(eipNamespace, podName, node1Name, podV4IP, egressPodLabel) + egressPodUDN := *newPodWithLabels(eipNamespace2, podName2, node1Name, podV4IP2, egressPodLabel) + + nadNsName := util.GetNADName(eipNamespace2, nadName1) + netconf := ovncnitypes.NetConf{ + NetConf: cnitypes.NetConf{ + Name: networkName1, + Type: "ovn-k8s-cni-overlay", + }, + Role: ovntypes.NetworkRolePrimary, + Topology: ovntypes.Layer3Topology, + NADName: nadNsName, + Subnets: v4Net1, + } + nad, err := newNetworkAttachmentDefinition( + eipNamespace2, + nadName1, + netconf, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + netInfo, err := util.NewNetInfo(&netconf) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + node1Annotations := map[string]string{ + "k8s.ovn.org/node-primary-ifaddr": fmt.Sprintf("{\"ipv4\": \"%s\", \"ipv6\": \"%s\"}", node1IPv4CIDR, ""), + "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":\"%s\",\"%s\":\"%s\"}", v4Node1Subnet, networkName1, v4Node1Net1), + "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node1Tsp), + "k8s.ovn.org/zone-name": node1Name, + "k8s.ovn.org/remote-zone-migrated": node1Name, + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), + } + labels := map[string]string{ + "k8s.ovn.org/egress-assignable": "", + } + node1 := getNodeObj(node1Name, node1Annotations, labels) + node2Annotations := map[string]string{ + "k8s.ovn.org/node-primary-ifaddr": fmt.Sprintf("{\"ipv4\": \"%s\", \"ipv6\": \"%s\"}", node2IPv4CIDR, ""), + "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":\"%s\",\"%s\":\"%s\"}", v4Node2Subnet, networkName1, v4Node2Net1), + "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node2Tsp), + "k8s.ovn.org/zone-name": node2Name, + "k8s.ovn.org/remote-zone-migrated": node2Name, + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), + } + node2 := getNodeObj(node2Name, node2Annotations, labels) + twoNodeStatus := []egressipv1.EgressIPStatusItem{ + { + Node: node1Name, + EgressIP: egressIP1, + }, + { + Node: node2Name, + EgressIP: egressIP2, + }, + } + eIP := egressipv1.EgressIP{ + ObjectMeta: newEgressIPMetaWithMark(egressIPName, eIP1Mark), + Spec: egressipv1.EgressIPSpec{ + EgressIPs: []string{egressIP1, egressIP2}, + PodSelector: metav1.LabelSelector{ + MatchLabels: egressPodLabel, + }, + NamespaceSelector: metav1.LabelSelector{ + MatchLabels: egressPodLabel, + }, + }, + Status: egressipv1.EgressIPStatus{ + Items: twoNodeStatus, + }, + } + ginkgo.By("create EgressIP that selects pods in a CDN and UDN") + initialDB := []libovsdbtest.TestData{ + //CDN start + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name, + Networks: []string{nodeLogicalRouterIfAddrV4}, + }, + &nbdb.LogicalRouter{ + Name: ovntypes.OVNClusterRouter, + UUID: ovntypes.OVNClusterRouter + "-UUID", + }, + &nbdb.LogicalRouter{ + Name: ovntypes.GWRouterPrefix + node1.Name, + UUID: ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Ports: []string{ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID"}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + node1Name + "-UUID", + Name: "k8s-" + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1CDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: node1Name + "-UUID", + Name: node1Name, + Ports: []string{"k8s-" + node1Name + "-UUID"}, + }, + // UDN start + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name, + Networks: []string{nodeLogicalRouterIfAddrV4}, + }, + &nbdb.LogicalRouter{ + Name: netInfo.GetNetworkScopedClusterRouterName(), + UUID: netInfo.GetNetworkScopedClusterRouterName() + "-UUID", + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: networkName1, ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + }, + &nbdb.LogicalRouter{ + UUID: netInfo.GetNetworkScopedGWRouterName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedGWRouterName(node1.Name), + Ports: []string{ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: networkName1, ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + networkName1_ + node1Name + "-UUID", + Name: "k8s-" + networkName1_ + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedSwitchName(node1.Name), + Ports: []string{"k8s-" + networkName1_ + node1Name + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: networkName1, ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + }, + } + fakeOvn.startWithDBSetup( + libovsdbtest.TestSetup{ + NBData: initialDB, + }, + &corev1.NodeList{ + Items: []corev1.Node{node1, node2}, + }, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressCDNNamespace, *egressUDNNamespace}, + }, + &corev1.PodList{ + Items: []corev1.Pod{egressPodCDN, egressPodUDN}, + }, + &nadv1.NetworkAttachmentDefinitionList{ + Items: []nadv1.NetworkAttachmentDefinition{*nad}, + }, + &egressipv1.EgressIPList{ + Items: []egressipv1.EgressIP{eIP}, + }, + ) + asf := addressset.NewOvnAddressSetFactory(fakeOvn.nbClient, true, false) + // watch EgressIP depends on UDN enabled svcs address set being available + c := udnenabledsvc.NewController(fakeOvn.nbClient, asf, fakeOvn.controller.watchFactory.ServiceCoreInformer(), []string{}) + go func() { + gomega.Expect(c.Run(ctx.Done())).Should(gomega.Succeed()) + }() + // Add pod IPs to CDN cache + iCDN, nCDN, _ := net.ParseCIDR(podV4IP + "/23") + nCDN.IP = iCDN + fakeOvn.controller.logicalPortCache.add(&egressPodCDN, "", ovntypes.DefaultNetworkName, "", nil, []*net.IPNet{nCDN}) + fakeOvn.controller.zone = node1Name + fakeOvn.eIPController.zone = node1Name + fakeOvn.controller.eIPC.nodeZoneState.Store(node1Name, true) + fakeOvn.controller.eIPC.nodeZoneState.Store(node2Name, false) + // Add pod IPs to UDN cache + iUDN, nUDN, _ := net.ParseCIDR(v4Pod1IPNode1Net1 + "/23") + nUDN.IP = iUDN + fakeOvn.controller.logicalPortCache.add(&egressPodUDN, "", util.GetNADName(nad.Namespace, nad.Name), "", nil, []*net.IPNet{nUDN}) + err = fakeOvn.nadController.Start() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.eIPController.ensureL3ClusterRouterPoliciesForNetwork(netInfo) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.eIPController.ensureL3SwitchPoliciesForNode(netInfo, node1Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressIPNamespaces() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressIPPods() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressNodes() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressIP() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) + egressIPServedPodsASCDNv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}, ovntypes.DefaultNetworkName, DefaultNetworkControllerName) + egressNodeIPsASv4, _ := buildEgressIPNodeAddressSets([]string{node1IPv4, node2IPv4}) + egressIPServedPodsASUDNv4, _ := buildEgressIPServedPodsAddressSetsForController([]string{v4Pod1IPNode1Net1}, netInfo.GetNetworkName(), DefaultNetworkControllerName) + gomega.Eventually(c.IsAddressSetAvailable).Should(gomega.BeTrue()) + dbIDs := udnenabledsvc.GetAddressSetDBIDs() + udnEnabledSvcV4, _ := addressset.GetTestDbAddrSets(dbIDs, []string{}) + node1LRP := "k8s-node1" + expectedDatabaseStateTwoEgressNodes := []libovsdbtest.TestData{ + // CDN + getReRouteStaticRoute(v4ClusterSubnet, nodeLogicalRouterIPv4[0]), + getReRoutePolicy(podV4IP, "4", "reroute-UUID", []string{nodeLogicalRouterIPv4[0], v4Node2Tsp}, + getEgressIPLRPReRouteDbIDs(eIP.Name, egressPodCDN.Namespace, egressPodCDN.Name, IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs()), + &nbdb.NAT{ + UUID: "egressip-nat-UUID", + LogicalIP: podV4IP, + ExternalIP: egressIP1, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPodCDN.Namespace, egressPodCDN.Name, IPFamilyValueV4, DefaultNetworkControllerName).GetExternalIDs(), + Type: nbdb.NATTypeSNAT, + LogicalPort: &node1LRP, + Options: map[string]string{ + "stateless": "false", + }, + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4ClusterSubnet, v4ClusterSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4ClusterSubnet, config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouter{ + Name: ovntypes.GWRouterPrefix + node1.Name, + UUID: ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Ports: []string{ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID"}, + Nat: []string{"egressip-nat-UUID"}, + }, + &nbdb.LogicalRouter{ + Name: ovntypes.OVNClusterRouter, + UUID: ovntypes.OVNClusterRouter + "-UUID", + Policies: []string{"default-no-reroute-UUID", "no-reroute-service-UUID", + "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic", "reroute-UUID"}, + StaticRoutes: []string{"reroute-static-route-UUID"}, + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name, + Networks: []string{"100.64.0.2/29"}, + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", + egressIPServedPodsASCDNv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": ovntypes.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + node1Name + "-UUID", + Name: "k8s-" + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1CDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: node1Name + "-UUID", + Name: node1Name, + Ports: []string{"k8s-" + node1Name + "-UUID"}, + QOSRules: []string{"default-QoS-UUID"}, + }, + getNoReRouteReplyTrafficPolicy(ovntypes.DefaultNetworkName, DefaultNetworkControllerName), + getDefaultQoSRule(false, ovntypes.DefaultNetworkName, DefaultNetworkControllerName), + egressSVCServedPodsASv4, + egressIPServedPodsASCDNv4, + egressNodeIPsASv4, + + // UDN + getReRouteStaticRouteForController(v4Net1, nodeLogicalRouterIPv4[0], netInfo.GetNetworkName()), + getReRoutePolicyForController(egressIPName, eipNamespace2, podName2, v4Pod1IPNode1Net1, eIP1Mark, IPFamilyValueV4, []string{nodeLogicalRouterIPv4[0], v4Node2Tsp}, netInfo.GetNetworkName(), DefaultNetworkControllerName), + getGWPktMarkLRPForController(eIP1Mark, egressIPName, eipNamespace2, podName2, v4Pod1IPNode1Net1, IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName), + getNoReRoutePolicyForUDNEnabledSvc(false, netInfo.GetNetworkName(), DefaultNetworkControllerName, egressIPServedPodsASUDNv4.Name, egressSVCServedPodsASv4.Name, udnEnabledSvcV4.Name), + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4Net1, v4Net1), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "udn-default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4Net1, config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "udn-no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", + egressIPServedPodsASUDNv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "udn-default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": ovntypes.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name, + Networks: []string{nodeLogicalRouterIfAddrV4}, + }, + &nbdb.LogicalRouter{ + Name: netInfo.GetNetworkScopedClusterRouterName(), + UUID: netInfo.GetNetworkScopedClusterRouterName() + "-UUID", + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + Policies: []string{"udn-default-no-reroute-node-UUID", "udn-default-no-reroute-UUID", "udn-no-reroute-service-UUID", "udn-enabled-svc-no-reroute-UUID", + fmt.Sprintf("%s-no-reroute-reply-traffic", netInfo.GetNetworkName()), + getReRoutePolicyUUID(eipNamespace2, podName2, IPFamilyValueV4, netInfo.GetNetworkName())}, + StaticRoutes: []string{fmt.Sprintf("%s-reroute-static-route-UUID", netInfo.GetNetworkName())}, + }, + &nbdb.LogicalRouter{ + UUID: netInfo.GetNetworkScopedGWRouterName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedGWRouterName(node1.Name), + Ports: []string{ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + Policies: []string{getGWPktMarkLRPUUID(eipNamespace2, podName2, IPFamilyValueV4, netInfo.GetNetworkName())}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + networkName1_ + node1Name + "-UUID", + Name: "k8s-" + networkName1_ + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedSwitchName(node1.Name), + Ports: []string{"k8s-" + networkName1_ + node1Name + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + QOSRules: []string{fmt.Sprintf("%s-QoS-UUID", netInfo.GetNetworkName())}, + }, + getNoReRouteReplyTrafficPolicyForController(netInfo.GetNetworkName(), DefaultNetworkControllerName), + getDefaultQoSRule(false, netInfo.GetNetworkName(), DefaultNetworkControllerName), + egressIPServedPodsASUDNv4, + udnEnabledSvcV4, + } + ginkgo.By("ensure expected equals actual") + gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseStateTwoEgressNodes)) + return nil + } + err := app.Run([]string{app.Name}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + }) + + ginkgo.Context("Pod update", func() { + ginkgo.It("should update UDN and CDN config", func() { + // Test steps: + // create an EIP no pods + // Create multiple pods, some selected by EIP selectors and some not + // EIP egresses locally and remote + app.Action = func(ctx *cli.Context) error { + // Node 1 is local, Node 2 is remote + egressIP1 := "192.168.126.101" + egressIP2 := "192.168.126.102" + node1IPv4 := "192.168.126.202" + node1IPv4CIDR := node1IPv4 + "/24" + node2IPv4 := "192.168.126.51" + node2IPv4CIDR := node2IPv4 + "/24" + _, node1CDNSubnet, _ := net.ParseCIDR(v4Node1Subnet) + _, node1UDNSubnet, _ := net.ParseCIDR(v4Node1Net1) + nadName := util.GetNADName(eipNamespace2, nadName1) + egressCDNNamespace := newNamespaceWithLabels(eipNamespace, egressPodLabel) + egressUDNNamespace := newNamespaceWithLabels(eipNamespace2, egressPodLabel) + egressPodCDNLocal := *newPodWithLabels(eipNamespace, podName, node1Name, podV4IP, nil) + egressPodUDNLocal := *newPodWithLabels(eipNamespace2, podName2, node1Name, v4Pod1IPNode1Net1, nil) + egressPodCDNRemote := *newPodWithLabels(eipNamespace, podName3, node2Name, podV4IP2, egressPodLabel) + setPrimaryNetworkAnnot(&egressPodCDNRemote, ovntypes.DefaultNetworkName, fmt.Sprintf("%s%s", podV4IP2, util.GetIPFullMaskString(podV4IP2))) + egressPodUDNRemote := *newPodWithLabels(eipNamespace2, podName4, node2Name, v4Pod2IPNode2Net1, egressPodLabel) + setPrimaryNetworkAnnot(&egressPodUDNRemote, nadName, fmt.Sprintf("%s%s", v4Pod2IPNode2Net1, util.GetIPFullMaskString(v4Pod2IPNode2Net1))) + netconf := ovncnitypes.NetConf{ + NetConf: cnitypes.NetConf{ + Name: networkName1, + Type: "ovn-k8s-cni-overlay", + }, + Role: ovntypes.NetworkRolePrimary, + Topology: ovntypes.Layer3Topology, + NADName: nadName, + Subnets: v4Net1, + } + nad, err := newNetworkAttachmentDefinition( + eipNamespace2, + nadName1, + netconf, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + netInfo, err := util.NewNetInfo(&netconf) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + node1Annotations := map[string]string{ + "k8s.ovn.org/node-primary-ifaddr": fmt.Sprintf("{\"ipv4\": \"%s\", \"ipv6\": \"%s\"}", node1IPv4CIDR, ""), + "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":\"%s\",\"%s\":\"%s\"}", v4Node1Subnet, networkName1, v4Node1Net1), + "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node1Tsp), + "k8s.ovn.org/zone-name": node1Name, + "k8s.ovn.org/remote-zone-migrated": node1Name, + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), + } + labels := map[string]string{ + "k8s.ovn.org/egress-assignable": "", + } + node1 := getNodeObj(node1Name, node1Annotations, labels) + node2Annotations := map[string]string{ + "k8s.ovn.org/node-primary-ifaddr": fmt.Sprintf("{\"ipv4\": \"%s\", \"ipv6\": \"%s\"}", node2IPv4CIDR, ""), + "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":\"%s\",\"%s\":\"%s\"}", v4Node2Subnet, networkName1, v4Node2Net1), + "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node2Tsp), + "k8s.ovn.org/zone-name": node2Name, + "k8s.ovn.org/remote-zone-migrated": node2Name, + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), + } + node2 := getNodeObj(node2Name, node2Annotations, labels) + twoNodeStatus := []egressipv1.EgressIPStatusItem{ + { + Node: node1Name, + EgressIP: egressIP1, + }, + { + Node: node2Name, + EgressIP: egressIP2, + }, + } + eIP := egressipv1.EgressIP{ + ObjectMeta: newEgressIPMetaWithMark(egressIPName, eIP1Mark), + Spec: egressipv1.EgressIPSpec{ + EgressIPs: []string{egressIP1, egressIP2}, + PodSelector: metav1.LabelSelector{ + MatchLabels: egressPodLabel, + }, + NamespaceSelector: metav1.LabelSelector{ + MatchLabels: egressPodLabel, + }, + }, + Status: egressipv1.EgressIPStatus{ + Items: twoNodeStatus, + }, + } + ginkgo.By("create EgressIP that doesnt select pods in a CDN and UDN") + initialDB := []libovsdbtest.TestData{ + //CDN start + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name, + Networks: []string{nodeLogicalRouterIfAddrV4}, + }, + &nbdb.LogicalRouter{ + Name: ovntypes.OVNClusterRouter, + UUID: ovntypes.OVNClusterRouter + "-UUID", + }, + &nbdb.LogicalRouter{ + Name: ovntypes.GWRouterPrefix + node1.Name, + UUID: ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Ports: []string{ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID"}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + node1Name + "-UUID", + Name: "k8s-" + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1CDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: node1Name + "-UUID", + Name: node1Name, + Ports: []string{"k8s-" + node1Name + "-UUID"}, + }, + // UDN start + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name, + Networks: []string{nodeLogicalRouterIfAddrV4}, + }, + &nbdb.LogicalRouter{ + Name: netInfo.GetNetworkScopedClusterRouterName(), + UUID: netInfo.GetNetworkScopedClusterRouterName() + "-UUID", + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: networkName1, ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + }, + &nbdb.LogicalRouter{ + UUID: netInfo.GetNetworkScopedGWRouterName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedGWRouterName(node1.Name), + Ports: []string{ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: networkName1, ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + networkName1_ + node1Name + "-UUID", + Name: "k8s-" + networkName1_ + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedSwitchName(node1.Name), + Ports: []string{"k8s-" + networkName1_ + node1Name + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: networkName1, ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + }, + } + fakeOvn.startWithDBSetup( + libovsdbtest.TestSetup{ + NBData: initialDB, + }, + &corev1.NodeList{ + Items: []corev1.Node{node1, node2}, + }, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressCDNNamespace, *egressUDNNamespace}, + }, + &corev1.PodList{ + Items: []corev1.Pod{egressPodCDNLocal, egressPodUDNLocal, egressPodCDNRemote, egressPodUDNRemote}, + }, + &nadv1.NetworkAttachmentDefinitionList{ + Items: []nadv1.NetworkAttachmentDefinition{*nad}, + }, + &egressipv1.EgressIPList{ + Items: []egressipv1.EgressIP{eIP}, + }, + ) + asf := addressset.NewOvnAddressSetFactory(fakeOvn.nbClient, true, false) + // watch EgressIP depends on UDN enabled svcs address set being available + c := udnenabledsvc.NewController(fakeOvn.nbClient, asf, fakeOvn.controller.watchFactory.ServiceCoreInformer(), []string{}) + go func() { + gomega.Expect(c.Run(ctx.Done())).Should(gomega.Succeed()) + }() + // Add pod IPs to CDN cache + iCDN, nCDN, _ := net.ParseCIDR(podV4IP + "/23") + nCDN.IP = iCDN + fakeOvn.controller.logicalPortCache.add(&egressPodCDNLocal, "", ovntypes.DefaultNetworkName, "", nil, []*net.IPNet{nCDN}) + err = fakeOvn.nadController.Start() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + fakeOvn.controller.zone = node1Name + fakeOvn.eIPController.zone = node1Name + err = fakeOvn.controller.WatchEgressIPNamespaces() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressIPPods() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressNodes() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressIP() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + secConInfo, ok := fakeOvn.secondaryControllers[networkName1] + gomega.Expect(ok).To(gomega.BeTrue()) + // Add pod IPs to UDN cache + iUDN, nUDN, _ := net.ParseCIDR(v4Pod1IPNode1Net1 + "/23") + nUDN.IP = iUDN + secConInfo.bnc.logicalPortCache.add(&egressPodUDNLocal, "", util.GetNADName(nad.Namespace, nad.Name), "", nil, []*net.IPNet{nUDN}) + ginkgo.By("update pod with label so its now selected by EgressIP") + egressPodCDNLocal.Labels = egressPodLabel + _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(eipNamespace).Update(context.Background(), &egressPodCDNLocal, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + egressPodUDNLocal.Labels = egressPodLabel + _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(eipNamespace2).Update(context.Background(), &egressPodUDNLocal, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) + egressIPServedPodsASCDNv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}, ovntypes.DefaultNetworkName, DefaultNetworkControllerName) + egressNodeIPsASv4, _ := buildEgressIPNodeAddressSets([]string{node1IPv4, node2IPv4}) + egressIPServedPodsASUDNv4, _ := buildEgressIPServedPodsAddressSetsForController([]string{v4Pod1IPNode1Net1}, netInfo.GetNetworkName(), DefaultNetworkControllerName) + gomega.Eventually(c.IsAddressSetAvailable).Should(gomega.BeTrue()) + dbIDs := udnenabledsvc.GetAddressSetDBIDs() + udnEnabledSvcV4, _ := addressset.GetTestDbAddrSets(dbIDs, []string{}) + node1LRP := "k8s-node1" + expectedDatabaseStateTwoEgressNodes := []libovsdbtest.TestData{ + // CDN + getReRouteStaticRoute(v4ClusterSubnet, nodeLogicalRouterIPv4[0]), + getReRoutePolicy(podV4IP, "4", "reroute-UUID", []string{nodeLogicalRouterIPv4[0], v4Node2Tsp}, + getEgressIPLRPReRouteDbIDs(eIP.Name, egressPodCDNLocal.Namespace, egressPodCDNLocal.Name, IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs()), + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4ClusterSubnet, v4ClusterSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4ClusterSubnet, config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouter{ + Name: ovntypes.GWRouterPrefix + node1.Name, + UUID: ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Ports: []string{ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID"}, + Nat: []string{"egressip-nat-UUID", "egressip-nat2-UUID"}, + }, + &nbdb.LogicalRouter{ + Name: ovntypes.OVNClusterRouter, + UUID: ovntypes.OVNClusterRouter + "-UUID", + Policies: []string{"default-no-reroute-UUID", "no-reroute-service-UUID", + "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic", "reroute-UUID"}, + StaticRoutes: []string{"reroute-static-route-UUID"}, + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name, + Networks: []string{"100.64.0.2/29"}, + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", + egressIPServedPodsASCDNv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": ovntypes.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + node1Name + "-UUID", + Name: "k8s-" + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1CDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: node1Name + "-UUID", + Name: node1Name, + Ports: []string{"k8s-" + node1Name + "-UUID"}, + QOSRules: []string{"default-QoS-UUID"}, + }, + &nbdb.NAT{ + UUID: "egressip-nat-UUID", + LogicalIP: podV4IP2, + ExternalIP: egressIP1, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPodCDNRemote.Namespace, egressPodCDNRemote.Name, IPFamilyValueV4, DefaultNetworkControllerName).GetExternalIDs(), + Type: nbdb.NATTypeSNAT, + LogicalPort: &node1LRP, + Options: map[string]string{ + "stateless": "false", + }, + }, + &nbdb.NAT{ + UUID: "egressip-nat2-UUID", + LogicalIP: podV4IP, + ExternalIP: egressIP1, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPodCDNLocal.Namespace, egressPodCDNLocal.Name, IPFamilyValueV4, DefaultNetworkControllerName).GetExternalIDs(), + Type: nbdb.NATTypeSNAT, + LogicalPort: &node1LRP, + Options: map[string]string{ + "stateless": "false", + }, + }, + getNoReRouteReplyTrafficPolicy(ovntypes.DefaultNetworkName, DefaultNetworkControllerName), + getDefaultQoSRule(false, ovntypes.DefaultNetworkName, DefaultNetworkControllerName), + egressSVCServedPodsASv4, + egressIPServedPodsASCDNv4, + egressNodeIPsASv4, + + // UDN + getReRouteStaticRouteForController(v4Net1, nodeLogicalRouterIPv4[0], netInfo.GetNetworkName()), + getReRoutePolicyForController(egressIPName, eipNamespace2, podName2, v4Pod1IPNode1Net1, eIP1Mark, IPFamilyValueV4, []string{nodeLogicalRouterIPv4[0], v4Node2Tsp}, netInfo.GetNetworkName(), DefaultNetworkControllerName), + getGWPktMarkLRPForController(eIP1Mark, egressIPName, eipNamespace2, podName2, v4Pod1IPNode1Net1, IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName), + getGWPktMarkLRPForController(eIP1Mark, egressIPName, eipNamespace2, podName4, v4Pod2IPNode2Net1, IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName), + getNoReRoutePolicyForUDNEnabledSvc(false, netInfo.GetNetworkName(), DefaultNetworkControllerName, egressIPServedPodsASUDNv4.Name, egressSVCServedPodsASv4.Name, udnEnabledSvcV4.Name), + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4Net1, v4Net1), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "udn-default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4Net1, config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "udn-no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", + egressIPServedPodsASUDNv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "udn-default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": ovntypes.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name, + Networks: []string{nodeLogicalRouterIfAddrV4}, + }, + &nbdb.LogicalRouter{ + Name: netInfo.GetNetworkScopedClusterRouterName(), + UUID: netInfo.GetNetworkScopedClusterRouterName() + "-UUID", + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + Policies: []string{"udn-default-no-reroute-node-UUID", "udn-default-no-reroute-UUID", "udn-no-reroute-service-UUID", "udn-enabled-svc-no-reroute-UUID", + fmt.Sprintf("%s-no-reroute-reply-traffic", netInfo.GetNetworkName()), + getReRoutePolicyUUID(eipNamespace2, podName2, IPFamilyValueV4, netInfo.GetNetworkName())}, + StaticRoutes: []string{fmt.Sprintf("%s-reroute-static-route-UUID", netInfo.GetNetworkName())}, + }, + &nbdb.LogicalRouter{ + UUID: netInfo.GetNetworkScopedGWRouterName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedGWRouterName(node1.Name), + Ports: []string{ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + Policies: []string{getGWPktMarkLRPUUID(eipNamespace2, podName2, IPFamilyValueV4, netInfo.GetNetworkName()), + getGWPktMarkLRPUUID(eipNamespace2, podName4, IPFamilyValueV4, netInfo.GetNetworkName())}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + networkName1_ + node1Name + "-UUID", + Name: "k8s-" + networkName1_ + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedSwitchName(node1.Name), + Ports: []string{"k8s-" + networkName1_ + node1Name + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, + QOSRules: []string{fmt.Sprintf("%s-QoS-UUID", netInfo.GetNetworkName())}, + }, + getNoReRouteReplyTrafficPolicyForController(netInfo.GetNetworkName(), DefaultNetworkControllerName), + getDefaultQoSRule(false, netInfo.GetNetworkName(), DefaultNetworkControllerName), + egressIPServedPodsASUDNv4, + udnEnabledSvcV4, + } + ginkgo.By("ensure expected equals actual") + gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseStateTwoEgressNodes)) + return nil + } + err := app.Run([]string{app.Name}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + }) +}) + +// returns the address set with externalID "k8s.ovn.org/name": "egressip-served-pods"" +func buildEgressIPServedPodsAddressSetsForController(ips []string, network, controller string) (*nbdb.AddressSet, *nbdb.AddressSet) { + dbIDs := getEgressIPAddrSetDbIDs(EgressIPServedPodsAddrSetName, network, controller) + return addressset.GetTestDbAddrSets(dbIDs, ips) + +} + +// returns the address set with externalID "k8s.ovn.org/name": "node-ips" +func buildEgressIPNodeAddressSetsForController(ips []string) (*nbdb.AddressSet, *nbdb.AddressSet) { + dbIDs := getEgressIPAddrSetDbIDs(NodeIPAddrSetName, ovntypes.DefaultNetworkName, DefaultNetworkControllerName) + return addressset.GetTestDbAddrSets(dbIDs, ips) +} + +// returns the LRP for marking reply traffic and not routing +func getNoReRouteReplyTrafficPolicyForController(network, controller string) *nbdb.LogicalRouterPolicy { + return &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("pkt.mark == %d", ovntypes.EgressIPReplyTrafficConnectionMark), + Action: nbdb.LogicalRouterPolicyActionAllow, + ExternalIDs: getEgressIPLRPNoReRouteDbIDs(ovntypes.DefaultNoRereoutePriority, ReplyTrafficNoReroute, IPFamilyValue, network, controller).GetExternalIDs(), + UUID: fmt.Sprintf("%s-no-reroute-reply-traffic", network), + } +} + +func getReRouteStaticRouteForController(clusterSubnet, nextHop, network string) *nbdb.LogicalRouterStaticRoute { + return &nbdb.LogicalRouterStaticRoute{ + Nexthop: nextHop, + Policy: &nbdb.LogicalRouterStaticRoutePolicySrcIP, + IPPrefix: clusterSubnet, + UUID: fmt.Sprintf("%s-reroute-static-route-UUID", network), + } +} + +func getReRoutePolicyForController(eIPName, podNamespace, podName, podIP string, mark int, ipFamily egressIPFamilyValue, nextHops []string, network, controller string) *nbdb.LogicalRouterPolicy { + return &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.EgressIPReroutePriority, + Match: fmt.Sprintf("%s.src == %s", ipFamily, podIP), + Action: nbdb.LogicalRouterPolicyActionReroute, + Nexthops: nextHops, + ExternalIDs: getEgressIPLRPReRouteDbIDs(eIPName, podNamespace, podName, ipFamily, network, controller).GetExternalIDs(), + Options: getMarkOptions(mark), + UUID: getReRoutePolicyUUID(podNamespace, podName, ipFamily, network), + } +} + +func getNoReRoutePolicyForUDNEnabledSvc(v6 bool, network, controllerName, eipSrcASHash, eSvcSrcASHash, udnEnabledSvcASHash string) *nbdb.LogicalRouterPolicy { + family := IPFamilyValueV4 + if v6 { + family = IPFamilyValueV6 + } + return &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", eipSrcASHash, eSvcSrcASHash, udnEnabledSvcASHash), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "udn-enabled-svc-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRouteDbIDs(ovntypes.DefaultNoRereoutePriority, NoReRouteUDNPodToCDNSvc, family, network, controllerName).GetExternalIDs(), + } +} + +func getReRoutePolicyUUID(podNamespace, podName string, ipFamily egressIPFamilyValue, network string) string { + return fmt.Sprintf("%s-reroute-%s-%s-%s", network, podNamespace, podName, ipFamily) +} + +func getGWPktMarkLRPForController(mark int, eIPName, podNamespace, podName, podIP string, ipFamily egressIPFamilyValue, network, controller string) *nbdb.LogicalRouterPolicy { + dbIDs := getEgressIPLRPSNATMarkDbIDs(eIPName, podNamespace, podName, ipFamily, network, controller) + return &nbdb.LogicalRouterPolicy{ + UUID: getGWPktMarkLRPUUID(podNamespace, podName, ipFamily, network), + Priority: ovntypes.EgressIPSNATMarkPriority, + Action: nbdb.LogicalRouterPolicyActionAllow, + ExternalIDs: dbIDs.GetExternalIDs(), + Options: getMarkOptions(mark), + Match: fmt.Sprintf("%s.src == %s && pkt.mark == 0", ipFamily, podIP), + } +} + +func getGWPktMarkLRPUUID(podNamespace, podName string, ipFamily egressIPFamilyValue, network string) string { + return fmt.Sprintf("%s-gw-pkt-mark-%s-%s-%s-UUID", network, podNamespace, podName, ipFamily) +} + +func getMarkOptions(mark int) map[string]string { + return map[string]string{"pkt_mark": fmt.Sprintf("%d", mark)} +} + +// jsonPatchOperation contains all the info needed to perform a JSON path operation to a k8 object +type jsonPatchOperation struct { + Operation string `json:"op"` + Path string `json:"path"` + Value interface{} `json:"value,omitempty"` +} + +type patchFn func(name string, patchData []byte) error + +func patchEgressIP(patchFn patchFn, name string, patches ...jsonPatchOperation) error { + klog.Infof("Patching status on EgressIP %s: %v", name, patches) + op, err := json.Marshal(patches) + if err != nil { + return fmt.Errorf("error serializing patch operation: %+v, err: %v", patches, err) + } + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + return patchFn(name, op) + }) +} + +func generateEgressIPPatches(mark int, statusItems []egressipv1.EgressIPStatusItem) []jsonPatchOperation { + patches := make([]jsonPatchOperation, 0, 1) + patches = append(patches, generateMarkPatchOp(mark)) + return append(patches, generateStatusPatchOp(statusItems)) +} + +func generateMarkPatchOp(mark int) jsonPatchOperation { + return jsonPatchOperation{ + Operation: "add", + Path: "/metadata/annotations", + Value: createAnnotWithMark(mark), + } +} + +func createAnnotWithMark(mark int) map[string]string { + return map[string]string{util.EgressIPMarkAnnotation: fmt.Sprintf("%d", mark)} +} + +func generateStatusPatchOp(statusItems []egressipv1.EgressIPStatusItem) jsonPatchOperation { + return jsonPatchOperation{ + Operation: "replace", + Path: "/status", + Value: egressipv1.EgressIPStatus{ + Items: statusItems, + }, + } +} + +func newEgressIPMetaWithMark(name string, mark int) metav1.ObjectMeta { + return metav1.ObjectMeta{ + UID: k8stypes.UID(name), + Name: name, + Labels: map[string]string{ + "name": name, + }, + Annotations: map[string]string{util.EgressIPMarkAnnotation: fmt.Sprintf("%d", mark)}, + } +} diff --git a/go-controller/pkg/ovn/egressqos.go b/go-controller/pkg/ovn/egressqos.go index 8223bdd4c2..5a993ecbbf 100644 --- a/go-controller/pkg/ovn/egressqos.go +++ b/go-controller/pkg/ovn/egressqos.go @@ -17,11 +17,13 @@ import ( "k8s.io/apimachinery/pkg/labels" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" + metaapplyv1 "k8s.io/client-go/applyconfigurations/meta/v1" v1coreinformers "k8s.io/client-go/informers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" utilnet "k8s.io/utils/net" + "k8s.io/utils/ptr" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" egressqosapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1" @@ -190,9 +192,9 @@ func (oc *DefaultNetworkController) initEgressQoSController( klog.Info("Setting up event handlers for EgressQoS") oc.egressQoSLister = eqInformer.Lister() oc.egressQoSSynced = eqInformer.Informer().HasSynced - oc.egressQoSQueue = workqueue.NewNamedRateLimitingQueue( - workqueue.NewItemFastSlowRateLimiter(1*time.Second, 5*time.Second, 5), - "egressqos", + oc.egressQoSQueue = workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemFastSlowRateLimiter[string](1*time.Second, 5*time.Second, 5), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "egressqos"}, ) _, err := eqInformer.Informer().AddEventHandler(factory.WithUpdateHandlingForObjReplace(cache.ResourceEventHandlerFuncs{ AddFunc: oc.onEgressQoSAdd, @@ -206,9 +208,9 @@ func (oc *DefaultNetworkController) initEgressQoSController( oc.egressQoSPodLister = podInformer.Lister() oc.egressQoSPodSynced = podInformer.Informer().HasSynced - oc.egressQoSPodQueue = workqueue.NewNamedRateLimitingQueue( - workqueue.NewItemFastSlowRateLimiter(1*time.Second, 5*time.Second, 5), - "egressqospods", + oc.egressQoSPodQueue = workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemFastSlowRateLimiter[string](1*time.Second, 5*time.Second, 5), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "egressqospods"}, ) _, err = podInformer.Informer().AddEventHandler(factory.WithUpdateHandlingForObjReplace(cache.ResourceEventHandlerFuncs{ AddFunc: oc.onEgressQoSPodAdd, @@ -221,9 +223,9 @@ func (oc *DefaultNetworkController) initEgressQoSController( oc.egressQoSNodeLister = nodeInformer.Lister() oc.egressQoSNodeSynced = nodeInformer.Informer().HasSynced - oc.egressQoSNodeQueue = workqueue.NewNamedRateLimitingQueue( - workqueue.NewItemFastSlowRateLimiter(1*time.Second, 5*time.Second, 5), - "egressqosnodes", + oc.egressQoSNodeQueue = workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemFastSlowRateLimiter[string](1*time.Second, 5*time.Second, 5), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "egressqosnodes"}, ) _, err = nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: oc.onEgressQoSNodeAdd, // we only care about new logical switches being added @@ -357,19 +359,18 @@ func (oc *DefaultNetworkController) processNextEgressQoSWorkItem(wg *sync.WaitGr defer oc.egressQoSQueue.Done(key) - eqKey := key.(string) - eq, err := oc.getEgressQoS(eqKey) + eq, err := oc.getEgressQoS(key) if err != nil { - utilruntime.HandleError(fmt.Errorf("failed to retrieve %s qos object: %v", eqKey, err)) + utilruntime.HandleError(fmt.Errorf("failed to retrieve %s qos object: %v", key, err)) oc.egressQoSQueue.Forget(key) return true } - err = oc.syncEgressQoS(eqKey, eq) + err = oc.syncEgressQoS(key, eq) if err == nil { oc.egressQoSQueue.Forget(key) if err = oc.updateEgressQoSZoneStatusToReady(eq); err != nil { - utilruntime.HandleError(fmt.Errorf("failed to update EgressQoS object %s with status: %v", eqKey, err)) + utilruntime.HandleError(fmt.Errorf("failed to update EgressQoS object %s with status: %v", key, err)) } return true } @@ -382,7 +383,7 @@ func (oc *DefaultNetworkController) processNextEgressQoSWorkItem(wg *sync.WaitGr } if err = oc.updateEgressQoSZoneStatusToNotReady(eq, err); err != nil { - utilruntime.HandleError(fmt.Errorf("failed to update EgressQoS object %s with status: %v", eqKey, err)) + utilruntime.HandleError(fmt.Errorf("failed to update EgressQoS object %s with status: %v", key, err)) } oc.egressQoSQueue.Forget(key) @@ -905,7 +906,7 @@ func (oc *DefaultNetworkController) processNextEgressQoSPodWorkItem(wg *sync.Wai } defer oc.egressQoSPodQueue.Done(key) - err := oc.syncEgressQoSPod(key.(string)) + err := oc.syncEgressQoSPod(key) if err == nil { oc.egressQoSPodQueue.Forget(key) return true @@ -979,7 +980,7 @@ func (oc *DefaultNetworkController) processNextEgressQoSNodeWorkItem(wg *sync.Wa } defer oc.egressQoSNodeQueue.Done(key) - err := oc.syncEgressQoSNode(key.(string)) + err := oc.syncEgressQoSNode(key) if err == nil { oc.egressQoSNodeQueue.Forget(key) return true @@ -1088,20 +1089,21 @@ func (oc *DefaultNetworkController) updateEgressQoSZoneStatusCondition(newCondit if err != nil { return err } + + newConditionApply := &metaapplyv1.ConditionApplyConfiguration{ + Type: &newCondition.Type, + Status: &newCondition.Status, + Reason: &newCondition.Reason, + Message: &newCondition.Message, + } + existingCondition := meta.FindStatusCondition(eq.Status.Conditions, newCondition.Type) - if existingCondition == nil { - newCondition.LastTransitionTime = metav1.NewTime(time.Now()) - } else { - if existingCondition.Status != newCondition.Status { - existingCondition.Status = newCondition.Status - existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) - } - existingCondition.Reason = newCondition.Reason - existingCondition.Message = newCondition.Message - newCondition = *existingCondition + if existingCondition == nil || existingCondition.Status != newCondition.Status { + newConditionApply.LastTransitionTime = ptr.To(metav1.NewTime(time.Now())) } + applyObj := egressqosapply.EgressQoS(name, namespace). - WithStatus(egressqosapply.EgressQoSStatus().WithConditions(newCondition)) + WithStatus(egressqosapply.EgressQoSStatus().WithConditions(newConditionApply)) _, err = oc.kube.EgressQoSClient.K8sV1().EgressQoSes(namespace).ApplyStatus(context.TODO(), applyObj, metav1.ApplyOptions{FieldManager: oc.zone, Force: true}) return err diff --git a/go-controller/pkg/ovn/egressqos_test.go b/go-controller/pkg/ovn/egressqos_test.go index 06c0ac3eb3..17fe8dca47 100644 --- a/go-controller/pkg/ovn/egressqos_test.go +++ b/go-controller/pkg/ovn/egressqos_test.go @@ -7,8 +7,7 @@ import ( "strings" "time" - "github.com/onsi/ginkgo" - ginkgotable "github.com/onsi/ginkgo/extensions/table" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" @@ -73,7 +72,7 @@ var _ = ginkgo.Describe("OVN EgressQoS Operations", func() { fakeOVN.shutdown() }) - ginkgotable.DescribeTable("reconciles existing and non-existing egressqoses without PodSelectors", + ginkgo.DescribeTable("reconciles existing and non-existing egressqoses without PodSelectors", func(ipv4Mode, ipv6Mode bool, dst1, dst2, match1, match2 string) { app.Action = func(ctx *cli.Context) error { config.IPv4Mode = ipv4Mode @@ -233,18 +232,18 @@ var _ = ginkgo.Describe("OVN EgressQoS Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }, - ginkgotable.Entry("ipv4", true, false, "1.2.3.4/32", "5.6.7.8/32", + ginkgo.Entry("ipv4", true, false, "1.2.3.4/32", "5.6.7.8/32", fmt.Sprintf("(ip4.dst == 1.2.3.4/32) && ip4.src == $%s", asv4), fmt.Sprintf("(ip4.dst == 5.6.7.8/32) && ip4.src == $%s", asv4)), - ginkgotable.Entry("ipv6", false, true, "2001:0db8:85a3:0000:0000:8a2e:0370:7334/128", "2001:0db8:85a3:0000:0000:8a2e:0370:7335/128", + ginkgo.Entry("ipv6", false, true, "2001:0db8:85a3:0000:0000:8a2e:0370:7334/128", "2001:0db8:85a3:0000:0000:8a2e:0370:7335/128", fmt.Sprintf("(ip6.dst == 2001:0db8:85a3:0000:0000:8a2e:0370:7334/128) && ip6.src == $%s", asv6), fmt.Sprintf("(ip6.dst == 2001:0db8:85a3:0000:0000:8a2e:0370:7335/128) && ip6.src == $%s", asv6)), - ginkgotable.Entry("dual", true, true, "1.2.3.4/32", "2001:0db8:85a3:0000:0000:8a2e:0370:7335/128", + ginkgo.Entry("dual", true, true, "1.2.3.4/32", "2001:0db8:85a3:0000:0000:8a2e:0370:7335/128", fmt.Sprintf("(ip4.dst == 1.2.3.4/32) && (ip4.src == $%s || ip6.src == $%s)", asv4, asv6), fmt.Sprintf("(ip6.dst == 2001:0db8:85a3:0000:0000:8a2e:0370:7335/128) && (ip4.src == $%s || ip6.src == $%s)", asv4, asv6)), ) - ginkgotable.DescribeTable("reconciles existing and non-existing egressqoses with PodSelectors", + ginkgo.DescribeTable("reconciles existing and non-existing egressqoses with PodSelectors", func(ipv4Mode, ipv6Mode bool, podIP, dst1, dst2, match1, match2 string) { app.Action = func(ctx *cli.Context) error { config.IPv4Mode = ipv4Mode @@ -432,13 +431,13 @@ var _ = ginkgo.Describe("OVN EgressQoS Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }, - ginkgotable.Entry("ipv4", true, false, "10.128.1.3", "1.2.3.4/32", "5.6.7.8/32", + ginkgo.Entry("ipv4", true, false, "10.128.1.3", "1.2.3.4/32", "5.6.7.8/32", fmt.Sprintf("(ip4.dst == 1.2.3.4/32) && ip4.src == $%s", qosASv4), fmt.Sprintf("(ip4.dst == 5.6.7.8/32) && ip4.src == $%s", asv4)), - ginkgotable.Entry("ipv6", false, true, "fd00:10:244:2::3", "2001:0db8:85a3:0000:0000:8a2e:0370:7334/128", "2001:0db8:85a3:0000:0000:8a2e:0370:7335/128", + ginkgo.Entry("ipv6", false, true, "fd00:10:244:2::3", "2001:0db8:85a3:0000:0000:8a2e:0370:7334/128", "2001:0db8:85a3:0000:0000:8a2e:0370:7335/128", fmt.Sprintf("(ip6.dst == 2001:0db8:85a3:0000:0000:8a2e:0370:7334/128) && ip6.src == $%s", qosASv6), fmt.Sprintf("(ip6.dst == 2001:0db8:85a3:0000:0000:8a2e:0370:7335/128) && ip6.src == $%s", asv6)), - ginkgotable.Entry("dual", true, true, "10.128.1.3", "1.2.3.4/32", "2001:0db8:85a3:0000:0000:8a2e:0370:7335/128", + ginkgo.Entry("dual", true, true, "10.128.1.3", "1.2.3.4/32", "2001:0db8:85a3:0000:0000:8a2e:0370:7335/128", fmt.Sprintf("(ip4.dst == 1.2.3.4/32) && (ip4.src == $%s || ip6.src == $%s)", qosASv4, qosASv6), fmt.Sprintf("(ip6.dst == 2001:0db8:85a3:0000:0000:8a2e:0370:7335/128) && (ip4.src == $%s || ip6.src == $%s)", asv4, asv6)), ) @@ -939,7 +938,7 @@ var _ = ginkgo.Describe("OVN EgressQoS Operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) - ginkgotable.DescribeTable("Ensure QoS AddressSet is updated properly for pod events", + ginkgo.DescribeTable("Ensure QoS AddressSet is updated properly for pod events", func(podZone string) { namespaceT := *newNamespace("namespace1") @@ -1047,8 +1046,8 @@ var _ = ginkgo.Describe("OVN EgressQoS Operations", func() { fakeOVN.asf.EventuallyExpectAddressSetWithAddresses(qosAS, nil) } }, - ginkgotable.Entry("create and update pod in local zone", "local"), - ginkgotable.Entry("create and update pod in remote zone", "remote"), + ginkgo.Entry("create and update pod in local zone", "local"), + ginkgo.Entry("create and update pod in remote zone", "remote"), ) }) diff --git a/go-controller/pkg/ovn/egressservices_test.go b/go-controller/pkg/ovn/egressservices_test.go index 0757cb441d..f11bf28308 100644 --- a/go-controller/pkg/ovn/egressservices_test.go +++ b/go-controller/pkg/ovn/egressservices_test.go @@ -5,8 +5,8 @@ import ( "fmt" "net" - "github.com/onsi/ginkgo" ginkgotable "github.com/onsi/ginkgo/extensions/table" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" @@ -73,7 +73,7 @@ var _ = ginkgo.Describe("OVN Egress Service Operations", func() { fakeOVN.shutdown() }) - ginkgo.Context("on startup repair", func() { + ginkgo.XContext("on startup repair", func() { ginkgo.It("should delete stale logical router policies and EgressService address set IPs", func() { app.Action = func(ctx *cli.Context) error { namespaceT := *newNamespace("testns") @@ -1501,7 +1501,7 @@ var _ = ginkgo.Describe("OVN Egress Service Operations", func() { fakeOVN.asf.ExpectAddressSetWithAddresses(egresssvc.GetEgressServiceAddrSetDbIDs(controllerName), expectedEgressSvcAddrSet) ginkgo.By("updating the second node host cidr the node ip no re-route address set will be updated") - nodeIPsASdbIDs := getEgressIPAddrSetDbIDs(NodeIPAddrSetName, DefaultNetworkControllerName) + nodeIPsASdbIDs := getEgressIPAddrSetDbIDs(NodeIPAddrSetName, ovntypes.DefaultNetworkName, DefaultNetworkControllerName) fakeOVN.asf.EventuallyExpectAddressSetWithAddresses(nodeIPsASdbIDs, []string{node1IPv4, node2IPv4, node1IPv6, node2IPv6}) node2.ObjectMeta.Annotations[util.OVNNodeHostCIDRs] = fmt.Sprintf("[\"%s\", \"%s\", \"%s\", \"%s\"]", node2IPv4+"/24", node2IPv6+"/64", vipIPv4+"/24", vipIPv6+"/64") @@ -1606,52 +1606,66 @@ func egressServiceRouterPolicy(uuid, key, addr, nexthop string) *nbdb.LogicalRou func getDefaultNoReroutePolicies(controllerName string) []*nbdb.LogicalRouterPolicy { allLRPS := []*nbdb.LogicalRouterPolicy{} egressSvcPodsV4, egressSvcPodsV6 := addressset.GetHashNamesForAS(egresssvc.GetEgressServiceAddrSetDbIDs(controllerName)) - egressipPodsV4, egressipPodsV6 := addressset.GetHashNamesForAS(getEgressIPAddrSetDbIDs(EgressIPServedPodsAddrSetName, controllerName)) - nodeIPsV4, nodeIPsV6 := addressset.GetHashNamesForAS(getEgressIPAddrSetDbIDs(NodeIPAddrSetName, controllerName)) + egressipPodsV4, egressipPodsV6 := addressset.GetHashNamesForAS(getEgressIPAddrSetDbIDs(EgressIPServedPodsAddrSetName, ovntypes.DefaultNetworkName, controllerName)) + nodeIPsV4, nodeIPsV6 := addressset.GetHashNamesForAS(getEgressIPAddrSetDbIDs(NodeIPAddrSetName, ovntypes.DefaultNetworkName, controllerName)) + v4ExtIDs := getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, controllerName).GetExternalIDs() + v6ExtIDs := getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV6, ovntypes.DefaultNetworkName, controllerName).GetExternalIDs() + allLRPS = append(allLRPS, &nbdb.LogicalRouterPolicy{ Priority: ovntypes.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", egressipPodsV4, egressSvcPodsV4, nodeIPsV4), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + ExternalIDs: v4ExtIDs, + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, }, &nbdb.LogicalRouterPolicy{ Priority: ovntypes.DefaultNoRereoutePriority, Match: fmt.Sprintf("(ip6.src == $%s || ip6.src == $%s) && ip6.dst == $%s", egressipPodsV6, egressSvcPodsV6, nodeIPsV6), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-v6-no-reroute-node-UUID", - Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-v6-no-reroute-node-UUID", + ExternalIDs: v6ExtIDs, + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, }, - getNoReRouteReplyTrafficPolicy(), + getNoReRouteReplyTrafficPolicy(ovntypes.DefaultNetworkName, controllerName), ) + v4Pod2PodExtIDs := getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, controllerName).GetExternalIDs() + v6Pod2PodExtIDs := getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV6, ovntypes.DefaultNetworkName, controllerName).GetExternalIDs() + v4Pod2JoinExtIDs := getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, controllerName).GetExternalIDs() + v6Pod2JoinExtIDs := getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV6, ovntypes.DefaultNetworkName, controllerName).GetExternalIDs() + allLRPS = append(allLRPS, &nbdb.LogicalRouterPolicy{ - Priority: ovntypes.DefaultNoRereoutePriority, - Match: "ip4.src == 10.128.0.0/16 && ip4.dst == 10.128.0.0/16", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-pod2pod-no-reroute-UUID", + Priority: ovntypes.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/16 && ip4.dst == 10.128.0.0/16", + Action: nbdb.LogicalRouterPolicyActionAllow, + ExternalIDs: v4Pod2PodExtIDs, + UUID: "default-pod2pod-no-reroute-UUID", }, &nbdb.LogicalRouterPolicy{ - Priority: ovntypes.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip4.src == 10.128.0.0/16 && ip4.dst == %s", config.Gateway.V4JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-service-UUID", + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/16 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + ExternalIDs: v4Pod2JoinExtIDs, + UUID: "no-reroute-service-UUID", }, &nbdb.LogicalRouterPolicy{ - Priority: ovntypes.DefaultNoRereoutePriority, - Match: "ip6.src == fe00::/16 && ip6.dst == fe00::/16", - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "default-v6-pod2pod-no-reroute-UUID", + Priority: ovntypes.DefaultNoRereoutePriority, + Match: "ip6.src == fe00::/16 && ip6.dst == fe00::/16", + Action: nbdb.LogicalRouterPolicyActionAllow, + ExternalIDs: v6Pod2PodExtIDs, + UUID: "default-v6-pod2pod-no-reroute-UUID", }, &nbdb.LogicalRouterPolicy{ - Priority: ovntypes.DefaultNoRereoutePriority, - Match: fmt.Sprintf("ip6.src == fe00::/16 && ip6.dst == %s", config.Gateway.V6JoinSubnet), - Action: nbdb.LogicalRouterPolicyActionAllow, - UUID: "no-reroute-v6-service-UUID", + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip6.src == fe00::/16 && ip6.dst == %s", config.Gateway.V6JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + ExternalIDs: v6Pod2JoinExtIDs, + UUID: "no-reroute-v6-service-UUID", }, ) diff --git a/go-controller/pkg/ovn/external_gateway_apb_test.go b/go-controller/pkg/ovn/external_gateway_apb_test.go index f8a3293208..0576624abe 100644 --- a/go-controller/pkg/ovn/external_gateway_apb_test.go +++ b/go-controller/pkg/ovn/external_gateway_apb_test.go @@ -21,8 +21,8 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" nettypes "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" - "github.com/onsi/ginkgo" - "github.com/onsi/ginkgo/extensions/table" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" "github.com/urfave/cli/v2" v1 "k8s.io/api/core/v1" @@ -104,7 +104,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { ginkgo.Context("on setting namespace gateway static hop", func() { - table.DescribeTable("reconciles an new pod with namespace single exgw static GW already set", func(bfd bool, finalNB []libovsdbtest.TestData) { + ginkgo.DescribeTable("reconciles an new pod with namespace single exgw static GW already set", func(bfd bool, finalNB []libovsdbtest.TestData) { app.Action = func(ctx *cli.Context) error { namespaceT := *newNamespace(namespaceName) @@ -167,7 +167,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }, table.Entry("No BFD", false, []libovsdbtest.TestData{ + }, ginkgo.Entry("No BFD", false, []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, @@ -203,7 +203,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { StaticRoutes: []string{"static-route-1-UUID"}, }, }), - table.Entry("BFD Enabled", true, []libovsdbtest.TestData{ + ginkgo.Entry("BFD Enabled", true, []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, @@ -246,7 +246,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, })) - table.DescribeTable("reconciles an new pod with namespace single exgw static GW after policy is created", func(bfd bool, finalNB []libovsdbtest.TestData) { + ginkgo.DescribeTable("reconciles an new pod with namespace single exgw static GW after policy is created", func(bfd bool, finalNB []libovsdbtest.TestData) { app.Action = func(ctx *cli.Context) error { namespaceT := *newNamespace(namespaceName) @@ -313,7 +313,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }, table.Entry("No BFD", false, []libovsdbtest.TestData{ + }, ginkgo.Entry("No BFD", false, []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, @@ -349,7 +349,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { StaticRoutes: []string{"static-route-1-UUID"}, }, }), - table.Entry("BFD Enabled", true, []libovsdbtest.TestData{ + ginkgo.Entry("BFD Enabled", true, []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, @@ -392,7 +392,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, })) - table.DescribeTable("reconciles an new pod with namespace single exgw static gateway already set with pod event first", func(bfd bool, finalNB []libovsdbtest.TestData) { + ginkgo.DescribeTable("reconciles an new pod with namespace single exgw static gateway already set with pod event first", func(bfd bool, finalNB []libovsdbtest.TestData) { app.Action = func(ctx *cli.Context) error { namespaceT := *newNamespace(namespaceName) @@ -452,7 +452,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }, table.Entry("No BFD", false, []libovsdbtest.TestData{ + }, ginkgo.Entry("No BFD", false, []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, @@ -488,7 +488,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { StaticRoutes: []string{"static-route-1-UUID"}, }, }), - table.Entry("BFD Enabled", true, []libovsdbtest.TestData{ + ginkgo.Entry("BFD Enabled", true, []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, @@ -531,7 +531,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, })) - table.DescribeTable("reconciles an new pod with namespace double exgw static gateways already set", func(bfd bool, finalNB []libovsdbtest.TestData) { + ginkgo.DescribeTable("reconciles an new pod with namespace double exgw static gateways already set", func(bfd bool, finalNB []libovsdbtest.TestData) { app.Action = func(ctx *cli.Context) error { @@ -595,7 +595,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }, - table.Entry("No BFD", false, []libovsdbtest.TestData{ + ginkgo.Entry("No BFD", false, []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, @@ -641,7 +641,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { StaticRoutes: []string{"static-route-1-UUID", "static-route-2-UUID"}, }, }), - table.Entry("BFD Enabled", true, []libovsdbtest.TestData{ + ginkgo.Entry("BFD Enabled", true, []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, @@ -701,7 +701,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }), ) - table.DescribeTable("reconciles deleting a pod with namespace double exgw static gateway already set", + ginkgo.DescribeTable("reconciles deleting a pod with namespace double exgw static gateway already set", func(bfd bool, initNB []libovsdbtest.TestData, syncNB []libovsdbtest.TestData, @@ -766,7 +766,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }, - table.Entry("No BFD", false, + ginkgo.Entry("No BFD", false, []libovsdbtest.TestData{ &nbdb.LogicalSwitch{ UUID: "node1", @@ -836,7 +836,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, }, ), - table.Entry("BFD", true, + ginkgo.Entry("BFD", true, []libovsdbtest.TestData{ &nbdb.LogicalSwitch{ UUID: "node1", @@ -920,7 +920,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { ), ) - table.DescribeTable("reconciles deleting a pod with namespace double exgw static gateway already set IPV6", + ginkgo.DescribeTable("reconciles deleting a pod with namespace double exgw static gateway already set IPV6", func(bfd bool, initNB, syncNB, finalNB []libovsdbtest.TestData) { app.Action = func(ctx *cli.Context) error { @@ -980,7 +980,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }, - table.Entry("BFD IPV6", true, []libovsdbtest.TestData{ + ginkgo.Entry("BFD IPV6", true, []libovsdbtest.TestData{ &nbdb.LogicalSwitch{ UUID: "node1", Name: "node1", @@ -1062,7 +1062,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { ), ) - table.DescribeTable("reconciles deleting a exgw namespace with active pod", + ginkgo.DescribeTable("reconciles deleting a exgw namespace with active pod", func(bfd bool, initNB []libovsdbtest.TestData, finalNB []libovsdbtest.TestData, @@ -1124,7 +1124,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }, - table.Entry("No BFD", false, + ginkgo.Entry("No BFD", false, []libovsdbtest.TestData{ &nbdb.LogicalSwitch{ UUID: "node1", @@ -1183,7 +1183,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, }, ), - table.Entry("BFD", true, + ginkgo.Entry("BFD", true, []libovsdbtest.TestData{ &nbdb.LogicalSwitch{ UUID: "node1", @@ -1257,7 +1257,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }) ginkgo.Context("on setting pod dynamic gateways", func() { - table.DescribeTable("reconciles a host networked pod acting as a exgw for another namespace for new pod", func(bfd bool, finalNB []libovsdbtest.TestData) { + ginkgo.DescribeTable("reconciles a host networked pod acting as a exgw for another namespace for new pod", func(bfd bool, finalNB []libovsdbtest.TestData) { app.Action = func(ctx *cli.Context) error { namespaceT := *newNamespace(namespaceName) @@ -1328,7 +1328,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }, table.Entry("No BFD", false, []libovsdbtest.TestData{ + }, ginkgo.Entry("No BFD", false, []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, @@ -1364,7 +1364,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { StaticRoutes: []string{"static-route-1-UUID"}, }, }), - table.Entry("BFD Enabled", true, []libovsdbtest.TestData{ + ginkgo.Entry("BFD Enabled", true, []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, @@ -1407,7 +1407,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, })) - table.DescribeTable("reconciles a host networked pod acting as a exgw for another namespace for existing pod", func(bfd bool, finalNB []libovsdbtest.TestData) { + ginkgo.DescribeTable("reconciles a host networked pod acting as a exgw for another namespace for existing pod", func(bfd bool, finalNB []libovsdbtest.TestData) { app.Action = func(ctx *cli.Context) error { namespaceT := *newNamespace(namespaceName) @@ -1470,7 +1470,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }, table.Entry("No BFD", false, []libovsdbtest.TestData{ + }, ginkgo.Entry("No BFD", false, []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, @@ -1506,7 +1506,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { StaticRoutes: []string{"static-route-1-UUID"}, }, }), - table.Entry("BFD Enabled", true, []libovsdbtest.TestData{ + ginkgo.Entry("BFD Enabled", true, []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, @@ -1549,7 +1549,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, })) - table.DescribeTable("reconciles a multus networked pod acting as a exgw for another namespace for new pod", func(bfd bool, finalNB []libovsdbtest.TestData) { + ginkgo.DescribeTable("reconciles a multus networked pod acting as a exgw for another namespace for new pod", func(bfd bool, finalNB []libovsdbtest.TestData) { app.Action = func(ctx *cli.Context) error { ns := nettypes.NetworkStatus{Name: "dummy", IPs: []string{"11.0.0.1"}} networkStatuses := []nettypes.NetworkStatus{ns} @@ -1632,7 +1632,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }, table.Entry("No BFD", false, []libovsdbtest.TestData{ + }, ginkgo.Entry("No BFD", false, []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, @@ -1668,7 +1668,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { StaticRoutes: []string{"static-route-1-UUID"}, }, }), - table.Entry("BFD Enabled", true, []libovsdbtest.TestData{ + ginkgo.Entry("BFD Enabled", true, []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, @@ -1711,7 +1711,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, })) - table.DescribeTable("reconciles deleting a host networked pod acting as a exgw for another namespace for existing pod", + ginkgo.DescribeTable("reconciles deleting a host networked pod acting as a exgw for another namespace for existing pod", func(bfd bool, beforeDeleteNB []libovsdbtest.TestData, afterDeleteNB []libovsdbtest.TestData) { @@ -1785,7 +1785,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }, - table.Entry("No BFD", false, + ginkgo.Entry("No BFD", false, []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", @@ -1849,7 +1849,7 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, }, ), - table.Entry("BFD Enabled", true, []libovsdbtest.TestData{ + ginkgo.Entry("BFD Enabled", true, []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, diff --git a/go-controller/pkg/ovn/external_ids_syncer/acl/acl_suite_test.go b/go-controller/pkg/ovn/external_ids_syncer/acl/acl_suite_test.go index b699917aed..3d4223369a 100644 --- a/go-controller/pkg/ovn/external_ids_syncer/acl/acl_suite_test.go +++ b/go-controller/pkg/ovn/external_ids_syncer/acl/acl_suite_test.go @@ -3,7 +3,7 @@ package acl_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/ovn/external_ids_syncer/acl/acl_sync.go b/go-controller/pkg/ovn/external_ids_syncer/acl/acl_sync.go index 861b0e5d46..2b2413bac1 100644 --- a/go-controller/pkg/ovn/external_ids_syncer/acl/acl_sync.go +++ b/go-controller/pkg/ovn/external_ids_syncer/acl/acl_sync.go @@ -131,7 +131,7 @@ func (syncer *ACLSyncer) SyncACLs(existingNodes []*v1.Node) error { // update acls with new ExternalIDs err = batching.Batch[*nbdb.ACL](syncer.txnBatchSize, uniquePrimaryIDACLs, func(batchACLs []*nbdb.ACL) error { - return libovsdbops.CreateOrUpdateACLs(syncer.nbClient, batchACLs...) + return libovsdbops.CreateOrUpdateACLs(syncer.nbClient, nil, batchACLs...) }) if err != nil { return fmt.Errorf("cannot update stale ACLs: %v", err) @@ -183,7 +183,7 @@ func (syncer *ACLSyncer) SyncACLs(existingNodes []*v1.Node) error { } // batch ACLs together in order of their priority: lowest first and then highest err = batching.Batch[*nbdb.ACL](syncer.txnBatchSize, aclsInTier0, func(batchACLs []*nbdb.ACL) error { - return libovsdbops.CreateOrUpdateACLs(syncer.nbClient, batchACLs...) + return libovsdbops.CreateOrUpdateACLs(syncer.nbClient, nil, batchACLs...) }) if err != nil { return fmt.Errorf("cannot update ACLs to tier2: %v", err) diff --git a/go-controller/pkg/ovn/external_ids_syncer/acl/acl_sync_test.go b/go-controller/pkg/ovn/external_ids_syncer/acl/acl_sync_test.go index b57fcd7fc9..dbba1910c5 100644 --- a/go-controller/pkg/ovn/external_ids_syncer/acl/acl_sync_test.go +++ b/go-controller/pkg/ovn/external_ids_syncer/acl/acl_sync_test.go @@ -5,7 +5,7 @@ import ( "fmt" "strings" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" diff --git a/go-controller/pkg/ovn/external_ids_syncer/address_set/address_set_suite_test.go b/go-controller/pkg/ovn/external_ids_syncer/address_set/address_set_suite_test.go index 5159a898fc..2709d429fa 100644 --- a/go-controller/pkg/ovn/external_ids_syncer/address_set/address_set_suite_test.go +++ b/go-controller/pkg/ovn/external_ids_syncer/address_set/address_set_suite_test.go @@ -3,7 +3,7 @@ package address_set_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/ovn/external_ids_syncer/address_set/address_set_sync.go b/go-controller/pkg/ovn/external_ids_syncer/address_set/address_set_sync.go index 197e7cc21c..0323282511 100644 --- a/go-controller/pkg/ovn/external_ids_syncer/address_set/address_set_sync.go +++ b/go-controller/pkg/ovn/external_ids_syncer/address_set/address_set_sync.go @@ -98,10 +98,11 @@ func checkIfNetpol(asName string) (netpolOwned bool, namespace, name, direction, return } -func (syncer *AddressSetsSyncer) getEgressIPAddrSetDbIDs(name string) *libovsdbops.DbObjectIDs { +func (syncer *AddressSetsSyncer) getEgressIPAddrSetDbIDs(name, network string) *libovsdbops.DbObjectIDs { return libovsdbops.NewDbObjectIDs(libovsdbops.AddressSetEgressIP, syncer.controllerName, map[libovsdbops.ExternalIDKey]string{ // egress ip creates cluster-wide address sets with egressIpAddrSetName libovsdbops.ObjectNameKey: name, + libovsdbops.NetworkKey: network, }) } @@ -196,9 +197,9 @@ func (syncer *AddressSetsSyncer) getReferencingObjsAndNewDbIDs(oldHash, oldName switch { // Filter address sets with pre-defined names case oldName == clusterNodeIP: - dbIDs = syncer.getEgressIPAddrSetDbIDs(nodeIPAddrSetName) + dbIDs = syncer.getEgressIPAddrSetDbIDs(nodeIPAddrSetName, "default") case oldName == egressIPServedPods: - dbIDs = syncer.getEgressIPAddrSetDbIDs(egressIPServedPodsAddrSetName) + dbIDs = syncer.getEgressIPAddrSetDbIDs(egressIPServedPodsAddrSetName, "default") case oldName == egressServiceServedPods: dbIDs = syncer.getEgressServiceAddrSetDbIDs() // HybridNodeRoute and EgressQoS address sets have specific prefixes diff --git a/go-controller/pkg/ovn/external_ids_syncer/address_set/address_set_sync_test.go b/go-controller/pkg/ovn/external_ids_syncer/address_set/address_set_sync_test.go index 49fe17fd79..cbec2a22a3 100644 --- a/go-controller/pkg/ovn/external_ids_syncer/address_set/address_set_sync_test.go +++ b/go-controller/pkg/ovn/external_ids_syncer/address_set/address_set_sync_test.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" @@ -542,12 +542,12 @@ var _ = ginkgo.Describe("OVN Address Set Syncer", func() { testData := []asSync{ { before: createInitialAS(asName1, []string{testIPv4}), - after: syncerToBuildData.getEgressIPAddrSetDbIDs(egressIPServedPodsAddrSetName), + after: syncerToBuildData.getEgressIPAddrSetDbIDs(egressIPServedPodsAddrSetName, "default"), addressSetFactoryIPID: ipv4AddressSetFactoryID, }, { before: createInitialAS(asName2, []string{testIPv4}), - after: syncerToBuildData.getEgressIPAddrSetDbIDs(nodeIPAddrSetName), + after: syncerToBuildData.getEgressIPAddrSetDbIDs(nodeIPAddrSetName, "default"), addressSetFactoryIPID: ipv4AddressSetFactoryID, }, { @@ -694,7 +694,7 @@ var _ = ginkgo.Describe("OVN Address Set Syncer", func() { }, { before: createInitialAS(asName2, []string{testIPv4}), - after: syncerToBuildData.getEgressIPAddrSetDbIDs(nodeIPAddrSetName), + after: syncerToBuildData.getEgressIPAddrSetDbIDs(nodeIPAddrSetName, "default"), addressSetFactoryIPID: ipv4AddressSetFactoryID, }, { diff --git a/go-controller/pkg/ovn/external_ids_syncer/logical_router_policy/logical_router_policy_suite_test.go b/go-controller/pkg/ovn/external_ids_syncer/logical_router_policy/logical_router_policy_suite_test.go new file mode 100644 index 0000000000..64c0d0f9f6 --- /dev/null +++ b/go-controller/pkg/ovn/external_ids_syncer/logical_router_policy/logical_router_policy_suite_test.go @@ -0,0 +1,13 @@ +package logical_router_policy + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestPortGroup(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "LRP Suite") +} diff --git a/go-controller/pkg/ovn/external_ids_syncer/logical_router_policy/logical_router_policy_sync.go b/go-controller/pkg/ovn/external_ids_syncer/logical_router_policy/logical_router_policy_sync.go new file mode 100644 index 0000000000..0798d170fb --- /dev/null +++ b/go-controller/pkg/ovn/external_ids_syncer/logical_router_policy/logical_router_policy_sync.go @@ -0,0 +1,463 @@ +package logical_router_policy + +import ( + "errors" + "fmt" + "net" + "strings" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + ovntypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/batching" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + "k8s.io/klog/v2" + utilsnet "k8s.io/utils/net" +) + +type LRPSyncer struct { + nbClient libovsdbclient.Client + controllerName string +} + +type egressIPNoReroutePolicyName string // the following are redefined from ovn pkg to prevent circ import violation +type egressIPFamilyValue string + +var ( + replyTrafficNoReroute egressIPNoReroutePolicyName = "EIP-No-Reroute-reply-traffic" + noReRoutePodToPod egressIPNoReroutePolicyName = "EIP-No-Reroute-Pod-To-Pod" + noReRoutePodToJoin egressIPNoReroutePolicyName = "EIP-No-Reroute-Pod-To-Join" + NoReRoutePodToNode egressIPNoReroutePolicyName = "EIP-No-Reroute-Pod-To-Node" + v4IPFamilyValue egressIPFamilyValue = "ip4" + v6IPFamilyValue egressIPFamilyValue = "ip6" + ipFamilyValue egressIPFamilyValue = "ip" // use it when its dualstack + defaultNetworkName = "default" +) + +// NewLRPSyncer adds owner references to a limited subnet of LRPs. controllerName is the name of the new controller that should own all LRPs without controller +func NewLRPSyncer(nbClient libovsdbclient.Client, controllerName string) *LRPSyncer { + return &LRPSyncer{ + nbClient: nbClient, + controllerName: controllerName, + } +} + +func (syncer *LRPSyncer) Sync() error { + v4JoinSubnet, v6JoinSubnet := getJoinSubnets() + v4ClusterSubnets, v6ClusterSubnets := getClusterSubnets() + if (len(v4ClusterSubnets) != 0 && v4JoinSubnet == nil) || (v4JoinSubnet != nil && len(v4ClusterSubnets) == 0) { + return fmt.Errorf("invalid IPv4 config - v4 cluster and join subnet must be valid") + } + if (len(v6ClusterSubnets) != 0 && v6JoinSubnet == nil) || (v6JoinSubnet != nil && len(v6ClusterSubnets) == 0) { + return fmt.Errorf("invalid IPv6 config - v6 cluster and join subnet must be valid") + } + if len(v4ClusterSubnets) == 0 && len(v6ClusterSubnets) == 0 { + return fmt.Errorf("IPv4 or IPv6 cluster subnets must be defined") + } + if err := syncer.syncEgressIPNoReroutes(v4ClusterSubnets, v6ClusterSubnets, v4JoinSubnet, v6JoinSubnet); err != nil { + return fmt.Errorf("failed to sync Logical Router Policies no reroutes: %v", err) + } + if err := syncer.syncEgressIPReRoutes(); err != nil { + return fmt.Errorf("failed to sync Logical Router Policies reroutes: %v", err) + } + return nil +} + +func (syncer *LRPSyncer) syncEgressIPReRoutes() error { + v4PodsNetInfo, v6PodsNetInfo, err := syncer.buildCDNPodCache() + if err != nil { + return fmt.Errorf("failed to build CDN pod cache from NB DB: %v", err) + } + noOwnerFn := libovsdbops.GetNoOwnerPredicate[*nbdb.LogicalRouterPolicy]() + p := func(item *nbdb.LogicalRouterPolicy) bool { + return item.Priority == ovntypes.EgressIPReroutePriority && noOwnerFn(item) && item.Match != "" && item.ExternalIDs["name"] != "" + } + lrpList, err := libovsdbops.FindALogicalRouterPoliciesWithPredicate(syncer.nbClient, ovntypes.OVNClusterRouter, p) + if err != nil { + if errors.Is(err, libovsdbclient.ErrNotFound) { + return nil + } + return fmt.Errorf("failed to find Logical Router Policies for %s: %v", ovntypes.OVNClusterRouter, err) + } + + err = batching.Batch[*nbdb.LogicalRouterPolicy](50, lrpList, func(batchLRPs []*nbdb.LogicalRouterPolicy) error { + var ops []libovsdb.Operation + for _, lrp := range lrpList { + eipName := lrp.ExternalIDs["name"] + podIP := extractIPFromEIPReRouteMatch(lrp.Match) + if podIP == nil { + return fmt.Errorf("failed to extract IP from LRP: %v", lrp) + } + isIPv6 := utilsnet.IsIPv6(podIP) + cache := v4PodsNetInfo + if isIPv6 { + cache = v6PodsNetInfo + } + podInfo, err := cache.getPod(podIP) + if err != nil { + klog.Infof("Failed to find Logical Switch Port cache entry for pod IP %s: %v", podIP.String(), err) + continue + } + ipFamily := getIPFamily(isIPv6) + lrp.ExternalIDs = getEgressIPLRPReRouteDbIDs(eipName, podInfo.namespace, podInfo.name, ipFamily, defaultNetworkName, syncer.controllerName).GetExternalIDs() + ops, err = libovsdbops.UpdateLogicalRouterPoliciesOps(syncer.nbClient, ops, lrp) + if err != nil { + return fmt.Errorf("failed to create logical router policy update ops: %v", err) + } + } + _, err = libovsdbops.TransactAndCheck(syncer.nbClient, ops) + if err != nil { + return fmt.Errorf("failed to transact EgressIP LRP reroute sync ops: %v", err) + } + return nil + }) + return err +} + +type podNetInfo struct { + ip net.IP + namespace string + name string +} + +type podsNetInfo []podNetInfo + +func (ps podsNetInfo) getPod(ip net.IP) (podNetInfo, error) { + for _, p := range ps { + if p.ip.Equal(ip) { + return p, nil + } + } + return podNetInfo{}, fmt.Errorf("failed to find a pod matching IP %v", ip) +} + +func (syncer *LRPSyncer) buildCDNPodCache() (podsNetInfo, podsNetInfo, error) { + p := func(item *nbdb.LogicalSwitchPort) bool { + return item.ExternalIDs["pod"] == "true" && item.ExternalIDs[ovntypes.NADExternalID] == "" // ignore secondary network LSPs + } + lsps, err := libovsdbops.FindLogicalSwitchPortWithPredicate(syncer.nbClient, p) + if err != nil { + return nil, nil, fmt.Errorf("failed to get logical switch ports: %v", err) + } + v4Pods, v6Pods := make(podsNetInfo, 0), make(podsNetInfo, 0) + for _, lsp := range lsps { + namespaceName, podName := util.GetNamespacePodFromCDNPortName(lsp.Name) + if namespaceName == "" || podName == "" { + klog.Errorf("Failed to extract namespace / pod from logical switch port %s", lsp.Name) + continue + } + if len(lsp.Addresses) == 0 { + klog.Errorf("Address(es) not set for pod %s/%s", namespaceName, podName) + continue + } + for i := 1; i < len(lsp.Addresses); i++ { + // CIDR is supported field within OVN, but for CDN we only set IP + ip := net.ParseIP(lsp.Addresses[i]) + if ip == nil { + klog.Errorf("Failed to extract IP %q from logical switch port for pod %s/%s", lsp.Addresses[i], namespaceName, podName) + continue + } + if utilsnet.IsIPv6(ip) { + v6Pods = append(v6Pods, podNetInfo{ip: ip, namespace: namespaceName, name: podName}) + } else { + v4Pods = append(v4Pods, podNetInfo{ip: ip, namespace: namespaceName, name: podName}) + } + } + } + + return v4Pods, v6Pods, nil +} + +// syncEgressIPNoReroutes syncs egress IP LRPs at priority 102 associated with CDN ovn_cluster_router and adds owner references. +// The aforementioned priority LRPs are used to skip egress IP for pod to pod, pod to join and pod to node traffic. +// There is also a no reroute reply traffic which ensures any traffic which is a response/reply from the egressIP pods +// will not be re-routed to egress-nodes. This LRP already contains owner references and does not need to be sync'd. +// Sample of the 3 IPv4 LRPs before sync that do not contain owner references: +// pod to pod: +// _uuid : 3ef68e05-8ed6-4cb7-847a-39dd1b190804 +// action : allow +// bfd_sessions : [] +// external_ids : {} +// match : "ip4.src == 10.244.0.0/16 && ip4.dst == 10.244.0.0/16" +// nexthop : [] +// nexthops : [] +// options : {} +// priority : 102 +// pod to join: +// _uuid : c7341960-188d-465c-9c39-020af9e60033 +// action : allow +// bfd_sessions : [] +// external_ids : {} +// match : "ip4.src == 10.244.0.0/16 && ip4.dst == 100.64.0.0/16" +// nexthop : [] +// nexthops : [] +// options : {} +// priority : 102 +// pod to node +// _uuid : 59b61efc-2a60-4fcc-ae60-391c3d6f5c40 +// action : allow +// bfd_sessions : [] +// external_ids : {} +// match : "(ip4.src == $a4548040316634674295 || ip4.src == $a13607449821398607916) && ip4.dst == $a14918748166599097711" +// nexthop : [] +// nexthops : [] +// options : {pkt_mark="1008"} +// priority : 102 +// +// Following sync, owner references are added. +func (syncer *LRPSyncer) syncEgressIPNoReroutes(v4ClusterSubnets, v6ClusterSubnets []*net.IPNet, v4JoinSubnet, v6JoinSubnet *net.IPNet) error { + if err := syncer.syncEgressIPNoReroutePodToJoin(v4ClusterSubnets, v6ClusterSubnets, v4JoinSubnet, v6JoinSubnet); err != nil { + return fmt.Errorf("failed to sync pod to join subnets: %v", err) + } + if err := syncer.syncEgressIPNoReroutePodToPod(v4ClusterSubnets, v6ClusterSubnets); err != nil { + return fmt.Errorf("failed to sync pod to pod subnets: %v", err) + } + if err := syncer.syncEgressIPNoReroutePodToNode(); err != nil { + return fmt.Errorf("failed to sync pod to node subnets: %v", err) + } + + return nil +} + +func (syncer *LRPSyncer) syncEgressIPNoReroutePodToJoin(v4ClusterSubnets, v6ClusterSubnets []*net.IPNet, v4JoinSubnet, v6JoinSubnet *net.IPNet) error { + noOwnerFn := libovsdbops.GetNoOwnerPredicate[*nbdb.LogicalRouterPolicy]() + p := func(item *nbdb.LogicalRouterPolicy) bool { + return item.Priority == ovntypes.DefaultNoRereoutePriority && noOwnerFn(item) && item.Match != "" && !strings.Contains(item.Match, "$") + } + lrpList, err := libovsdbops.FindALogicalRouterPoliciesWithPredicate(syncer.nbClient, ovntypes.OVNClusterRouter, p) + if err != nil { + if errors.Is(err, libovsdbclient.ErrNotFound) { + return nil + } + return fmt.Errorf("failed to find Logical Router Policies for %s: %v", ovntypes.OVNClusterRouter, err) + } + var ops []libovsdb.Operation + for _, lrp := range lrpList { + ipNets := extractCIDRs(lrp.Match) + if len(ipNets) != 2 { + continue + } + clusterCIDR := ipNets[0] + joinCIDR := ipNets[1] + isIPV6 := utilsnet.IsIPv6CIDR(clusterCIDR) + ipFamily := getIPFamily(isIPV6) + if isIPV6 { + if !containsIPNet(v6ClusterSubnets, clusterCIDR) || !util.IsIPNetEqual(v6JoinSubnet, joinCIDR) { + continue + } + } else { + if !containsIPNet(v4ClusterSubnets, clusterCIDR) || !util.IsIPNetEqual(v4JoinSubnet, joinCIDR) { + continue + } + } + lrp.ExternalIDs = getEgressIPLRPNoReRoutePodToJoinDbIDs(ipFamily, defaultNetworkName, syncer.controllerName).GetExternalIDs() + ops, err = libovsdbops.UpdateLogicalRouterPoliciesOps(syncer.nbClient, ops, lrp) + if err != nil { + return fmt.Errorf("failed to create logical router policy update ops: %v", err) + } + } + _, err = libovsdbops.TransactAndCheck(syncer.nbClient, ops) + if err != nil { + return fmt.Errorf("failed to transact pod to join subnet sync ops: %v", err) + } + return nil +} + +func (syncer *LRPSyncer) syncEgressIPNoReroutePodToPod(v4ClusterSubnets, v6ClusterSubnets []*net.IPNet) error { + noOwnerFn := libovsdbops.GetNoOwnerPredicate[*nbdb.LogicalRouterPolicy]() + p := func(item *nbdb.LogicalRouterPolicy) bool { + return item.Priority == ovntypes.DefaultNoRereoutePriority && noOwnerFn(item) && item.Match != "" && !strings.Contains(item.Match, "$") + } + lrpList, err := libovsdbops.FindALogicalRouterPoliciesWithPredicate(syncer.nbClient, ovntypes.OVNClusterRouter, p) + if err != nil { + if errors.Is(err, libovsdbclient.ErrNotFound) { + return nil + } + return err + } + var ops []libovsdb.Operation + for _, lrp := range lrpList { + ipNets := extractCIDRs(lrp.Match) + if len(ipNets) != 2 { + continue + } + clusterCIDR1 := ipNets[0] + clusterCIDR2 := ipNets[1] + if !util.IsIPNetEqual(clusterCIDR1, clusterCIDR2) { + continue + } + isIPV6 := utilsnet.IsIPv6CIDR(clusterCIDR1) + var ipFamily egressIPFamilyValue + if isIPV6 { + ipFamily = v6IPFamilyValue + if !containsIPNet(v6ClusterSubnets, clusterCIDR1) { + continue + } + } else { + ipFamily = v4IPFamilyValue + if !containsIPNet(v4ClusterSubnets, clusterCIDR1) { + continue + } + } + lrp.ExternalIDs = getEgressIPLRPNoReRoutePodToPodDbIDs(ipFamily, defaultNetworkName, syncer.controllerName).GetExternalIDs() + ops, err = libovsdbops.UpdateLogicalRouterPoliciesOps(syncer.nbClient, ops, lrp) + if err != nil { + return fmt.Errorf("failed to create logical router policy ops: %v", err) + } + } + _, err = libovsdbops.TransactAndCheck(syncer.nbClient, ops) + if err != nil { + return fmt.Errorf("failed to transact pod to pod subnet sync ops: %v", err) + } + return nil +} + +func (syncer *LRPSyncer) syncEgressIPNoReroutePodToNode() error { + noOwnerFn := libovsdbops.GetNoOwnerPredicate[*nbdb.LogicalRouterPolicy]() + p := func(item *nbdb.LogicalRouterPolicy) bool { + return item.Priority == ovntypes.DefaultNoRereoutePriority && noOwnerFn(item) && strings.Contains(item.Match, "$") && item.Options["pkt_mark"] != "" + } + lrpList, err := libovsdbops.FindALogicalRouterPoliciesWithPredicate(syncer.nbClient, ovntypes.OVNClusterRouter, p) + if err != nil { + if errors.Is(err, libovsdbclient.ErrNotFound) { + return nil + } + return err + } + var ops []libovsdb.Operation + for _, lrp := range lrpList { + isIPV6 := strings.Contains(lrp.Match, string(v6IPFamilyValue)) + ipFamily := getIPFamily(isIPV6) + lrp.ExternalIDs = getEgressIPLRPNoReRoutePodToNodeDbIDs(ipFamily, defaultNetworkName, syncer.controllerName).GetExternalIDs() + ops, err = libovsdbops.UpdateLogicalRouterPoliciesOps(syncer.nbClient, ops, lrp) + if err != nil { + return fmt.Errorf("failed to create logical router pololicy update ops: %v", err) + } + } + _, err = libovsdbops.TransactAndCheck(syncer.nbClient, ops) + if err != nil { + return fmt.Errorf("failed to transact pod to node subnet sync ops: %v", err) + } + return nil +} + +func getJoinSubnets() (*net.IPNet, *net.IPNet) { + var v4JoinSubnet *net.IPNet + var v6JoinSubnet *net.IPNet + var err error + if config.IPv4Mode { + _, v4JoinSubnet, err = net.ParseCIDR(config.Gateway.V4JoinSubnet) + if err != nil { + klog.Errorf("Failed to parse IPv4 Join subnet") + } + } + if config.IPv6Mode { + _, v6JoinSubnet, err = net.ParseCIDR(config.Gateway.V6JoinSubnet) + if err != nil { + klog.Errorf("Failed to parse IPv6 Join subnet") + } + } + return v4JoinSubnet, v6JoinSubnet +} + +func getClusterSubnets() ([]*net.IPNet, []*net.IPNet) { + var v4ClusterSubnets []*net.IPNet + var v6ClusterSubnets []*net.IPNet + + for _, subnet := range config.Default.ClusterSubnets { + if utilsnet.IsIPv6CIDR(subnet.CIDR) { + if config.IPv6Mode { + v6ClusterSubnets = append(v6ClusterSubnets, subnet.CIDR) + } + } else { + if config.IPv4Mode { + v4ClusterSubnets = append(v4ClusterSubnets, subnet.CIDR) + } + } + } + return v4ClusterSubnets, v6ClusterSubnets +} + +func getEgressIPLRPReRouteDbIDs(egressIPName, podNamespace, podName string, ipFamily egressIPFamilyValue, network, controller string) *libovsdbops.DbObjectIDs { + return libovsdbops.NewDbObjectIDs(libovsdbops.LogicalRouterPolicyEgressIP, controller, map[libovsdbops.ExternalIDKey]string{ + libovsdbops.ObjectNameKey: fmt.Sprintf("%s_%s/%s", egressIPName, podNamespace, podName), + libovsdbops.PriorityKey: fmt.Sprintf("%d", ovntypes.EgressIPReroutePriority), + libovsdbops.IPFamilyKey: string(ipFamily), + libovsdbops.NetworkKey: network, + }) +} + +func getEgressIPLRPNoReRoutePodToJoinDbIDs(ipFamily egressIPFamilyValue, network, controller string) *libovsdbops.DbObjectIDs { + return libovsdbops.NewDbObjectIDs(libovsdbops.LogicalRouterPolicyEgressIP, controller, map[libovsdbops.ExternalIDKey]string{ + libovsdbops.ObjectNameKey: string(noReRoutePodToJoin), + libovsdbops.PriorityKey: fmt.Sprintf("%d", ovntypes.DefaultNoRereoutePriority), + libovsdbops.IPFamilyKey: string(ipFamily), + libovsdbops.NetworkKey: network, + }) +} + +func getEgressIPLRPNoReRoutePodToPodDbIDs(ipFamily egressIPFamilyValue, network, controller string) *libovsdbops.DbObjectIDs { + return libovsdbops.NewDbObjectIDs(libovsdbops.LogicalRouterPolicyEgressIP, controller, map[libovsdbops.ExternalIDKey]string{ + libovsdbops.ObjectNameKey: string(noReRoutePodToPod), + libovsdbops.PriorityKey: fmt.Sprintf("%d", ovntypes.DefaultNoRereoutePriority), + libovsdbops.IPFamilyKey: string(ipFamily), + libovsdbops.NetworkKey: network, + }) +} + +func getEgressIPLRPNoReRoutePodToNodeDbIDs(ipFamily egressIPFamilyValue, network, controller string) *libovsdbops.DbObjectIDs { + return libovsdbops.NewDbObjectIDs(libovsdbops.LogicalRouterPolicyEgressIP, controller, map[libovsdbops.ExternalIDKey]string{ + libovsdbops.ObjectNameKey: string(NoReRoutePodToNode), + libovsdbops.PriorityKey: fmt.Sprintf("%d", ovntypes.DefaultNoRereoutePriority), + libovsdbops.IPFamilyKey: string(ipFamily), + libovsdbops.NetworkKey: network, + }) +} + +func extractCIDRs(line string) []*net.IPNet { + ipNets := make([]*net.IPNet, 0) + strs := strings.Split(line, " ") + for _, str := range strs { + if !strings.Contains(str, "/") { + continue + } + _, ipNet, err := net.ParseCIDR(str) + if err != nil { + klog.Errorf("Failed to parse CIDR from string %q: %v", str, err) + continue + } + if ipNet != nil { + ipNets = append(ipNets, ipNet) + } + } + return ipNets +} + +func extractIPFromEIPReRouteMatch(match string) net.IP { + strs := strings.Split(match, " ") + var ip net.IP + if len(strs) != 3 { + return ip + } + return net.ParseIP(strs[2]) +} + +func containsIPNet(ipNets []*net.IPNet, candidate *net.IPNet) bool { + for _, ipNet := range ipNets { + if util.IsIPNetEqual(ipNet, candidate) { + return true + } + } + return false +} + +func getIPFamily(isIPv6 bool) egressIPFamilyValue { + if isIPv6 { + return v6IPFamilyValue + } + return v4IPFamilyValue +} diff --git a/go-controller/pkg/ovn/external_ids_syncer/logical_router_policy/logical_router_policy_sync_test.go b/go-controller/pkg/ovn/external_ids_syncer/logical_router_policy/logical_router_policy_sync_test.go new file mode 100644 index 0000000000..42396a7563 --- /dev/null +++ b/go-controller/pkg/ovn/external_ids_syncer/logical_router_policy/logical_router_policy_sync_test.go @@ -0,0 +1,561 @@ +package logical_router_policy + +import ( + "fmt" + "net" + "strings" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + + ginkgotable "github.com/onsi/ginkgo/extensions/table" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" +) + +type lrpSync struct { + initialLRPs []*nbdb.LogicalRouterPolicy + finalLRPs []*nbdb.LogicalRouterPolicy + v4ClusterSubnets []*net.IPNet + v6ClusterSubnets []*net.IPNet + v4JoinSubnet *net.IPNet + v6JoinSubnet *net.IPNet + pods podsNetInfo +} + +var _ = ginkgo.Describe("OVN Logical Router Syncer", func() { + const ( + egressIPName = "testeip" + v4PodClusterSubnetStr = "10.244.0.0/16" + podName = "pod" + podNamespace = "test" + v4PodIPStr = "10.244.0.5" + v6PodIPStr = "2001:db8:abcd:12::5" + pod2Name = "pod2" + pod2Namespace = "test2" + v4Pod2IPStr = "10.244.0.6" + v4PodNextHop = "100.64.0.2" + v6PodNextHop = "fe70::2" + v6PodClusterSubnetStr = "2001:db8:abcd:12::/64" + v4JoinSubnetStr = "100.64.0.0/16" + v6JoinSubnetStr = "fe80::/64" + v4PodToNodeMatch = "(ip4.src == $a4548040316634674295 || ip4.src == $a13607449821398607916) && ip4.dst == $a14918748166599097711" + v6PodToNodeMatch = "(ip6.src == $a4548040316634674295 || ip6.src == $a13607449821398607916) && ip6.dst == $a14918748166599097711" + defaultNetworkControllerName = "default-network-controller" + ) + + var ( + _, v4PodClusterSubnet, _ = net.ParseCIDR(v4PodClusterSubnetStr) + _, v6PodClusterSubnet, _ = net.ParseCIDR(v6PodClusterSubnetStr) + _, v4JoinSubnet, _ = net.ParseCIDR(v4JoinSubnetStr) + _, v6JoinSubnet, _ = net.ParseCIDR(v6JoinSubnetStr) + v4PodNextHops = []string{v4PodNextHop} + v6PodNextHops = []string{v6PodNextHop} + v4PodIP = net.ParseIP(v4PodIPStr) + v6PodIP = net.ParseIP(v6PodIPStr) + v4Pod2IP = net.ParseIP(v4Pod2IPStr) + v4PodNetInfo = podNetInfo{v4PodIP, podNamespace, podName} + v6PodNetInfo = podNetInfo{v6PodIP, podNamespace, podName} + v4Pod2NetInfo = podNetInfo{v4Pod2IP, pod2Namespace, pod2Name} + defaultNetwork = util.DefaultNetInfo{} + defaultNetworkName = defaultNetwork.GetNetworkName() + ) + + ginkgo.Context("EgressIP", func() { + ginkgotable.DescribeTable("reroutes", func(sync lrpSync) { + // pod reroutes may not have any owner references besides 'name' which equals EgressIP name + performTest(defaultNetworkControllerName, sync.initialLRPs, sync.finalLRPs, sync.v4ClusterSubnets, sync.v6ClusterSubnets, + sync.v4JoinSubnet, sync.v6JoinSubnet, sync.pods) + }, + ginkgotable.Entry("add reference to IPv4 LRP with no reference", lrpSync{ + initialLRPs: []*nbdb.LogicalRouterPolicy{getReRouteLRP(podNamespace, podName, v4PodIPStr, 0, v4IPFamilyValue, + v4PodNextHops, map[string]string{"name": egressIPName}, defaultNetworkControllerName)}, + finalLRPs: []*nbdb.LogicalRouterPolicy{getReRouteLRP(podNamespace, podName, v4PodIPStr, 0, v4IPFamilyValue, v4PodNextHops, + getEgressIPLRPReRouteDbIDs(egressIPName, podNamespace, podName, v4IPFamilyValue, defaultNetwork.GetNetworkName(), defaultNetworkControllerName).GetExternalIDs(), + defaultNetworkControllerName), + }, + v4ClusterSubnets: []*net.IPNet{v4PodClusterSubnet}, + v4JoinSubnet: v4JoinSubnet, + pods: podsNetInfo{v4PodNetInfo, v6PodNetInfo, v4Pod2NetInfo}, + }), + ginkgotable.Entry("add reference to IPv6 LRP with no reference", lrpSync{ + initialLRPs: []*nbdb.LogicalRouterPolicy{getReRouteLRP(podNamespace, podName, v6PodIPStr, 0, v6IPFamilyValue, + v6PodNextHops, map[string]string{"name": egressIPName}, defaultNetworkControllerName)}, + finalLRPs: []*nbdb.LogicalRouterPolicy{getReRouteLRP(podNamespace, podName, v6PodIPStr, 0, v6IPFamilyValue, v6PodNextHops, + getEgressIPLRPReRouteDbIDs(egressIPName, podNamespace, podName, v6IPFamilyValue, defaultNetworkName, defaultNetworkControllerName).GetExternalIDs(), + defaultNetworkControllerName)}, + v6ClusterSubnets: []*net.IPNet{v6PodClusterSubnet}, + v6JoinSubnet: v6JoinSubnet, + pods: podsNetInfo{v4PodNetInfo, v6PodNetInfo, v4Pod2NetInfo}, + }), + ginkgotable.Entry("add references to IPv4 & IPv6 (dual) LRP with no references", lrpSync{ + initialLRPs: []*nbdb.LogicalRouterPolicy{ + getReRouteLRP(podNamespace, podName, v4PodIPStr, 0, v4IPFamilyValue, v4PodNextHops, map[string]string{"name": egressIPName}, defaultNetworkControllerName), + getReRouteLRP(podNamespace, podName, v6PodIPStr, 0, v6IPFamilyValue, v6PodNextHops, map[string]string{"name": egressIPName}, defaultNetworkControllerName)}, + finalLRPs: []*nbdb.LogicalRouterPolicy{ + getReRouteLRP(podNamespace, podName, v4PodIPStr, 0, v4IPFamilyValue, v4PodNextHops, + getEgressIPLRPReRouteDbIDs(egressIPName, podNamespace, podName, v4IPFamilyValue, defaultNetworkName, defaultNetworkControllerName).GetExternalIDs(), defaultNetworkControllerName), + getReRouteLRP(podNamespace, podName, v6PodIPStr, 0, v6IPFamilyValue, v6PodNextHops, + getEgressIPLRPReRouteDbIDs(egressIPName, podNamespace, podName, v6IPFamilyValue, defaultNetworkName, defaultNetworkControllerName).GetExternalIDs(), defaultNetworkControllerName)}, + v4ClusterSubnets: []*net.IPNet{v4PodClusterSubnet}, + v4JoinSubnet: v4JoinSubnet, + v6ClusterSubnets: []*net.IPNet{v6PodClusterSubnet}, + v6JoinSubnet: v6JoinSubnet, + pods: podsNetInfo{v4PodNetInfo, v6PodNetInfo, v4Pod2NetInfo}, + }), + ginkgotable.Entry("does not modify IPv4 LRP which contains a reference", lrpSync{ + initialLRPs: []*nbdb.LogicalRouterPolicy{getReRouteLRP(podNamespace, podName, v4PodIPStr, 0, v4IPFamilyValue, v4PodNextHops, + getEgressIPLRPReRouteDbIDs(egressIPName, podNamespace, podName, v4IPFamilyValue, defaultNetworkName, defaultNetworkControllerName).GetExternalIDs(), + defaultNetworkControllerName)}, + finalLRPs: []*nbdb.LogicalRouterPolicy{getReRouteLRP(podNamespace, podName, v4PodIPStr, 0, v4IPFamilyValue, v4PodNextHops, + getEgressIPLRPReRouteDbIDs(egressIPName, podNamespace, podName, v4IPFamilyValue, defaultNetworkName, defaultNetworkControllerName).GetExternalIDs(), + defaultNetworkControllerName)}, + v4ClusterSubnets: []*net.IPNet{v4PodClusterSubnet}, + v4JoinSubnet: v4JoinSubnet, + pods: podsNetInfo{v4PodNetInfo}, + }), + ginkgotable.Entry("does not return error when unable to build a reference because of failed pod IP lookup", lrpSync{ + initialLRPs: []*nbdb.LogicalRouterPolicy{getReRouteLRP(podNamespace, podName, v4PodIPStr, 0, v4IPFamilyValue, v4PodNextHops, + map[string]string{"name": egressIPName}, + defaultNetworkControllerName)}, + finalLRPs: []*nbdb.LogicalRouterPolicy{getReRouteLRP(podNamespace, podName, v4PodIPStr, 0, v4IPFamilyValue, v4PodNextHops, + map[string]string{"name": egressIPName}, + defaultNetworkControllerName)}, + v4ClusterSubnets: []*net.IPNet{v4PodClusterSubnet}, + v4JoinSubnet: v4JoinSubnet, + pods: podsNetInfo{}, + }), + ) + + ginkgotable.DescribeTable("pod to join, pod to pod, pod to node aka 'no reroutes'", func(sync lrpSync) { + // pod to join LRPs may not have any owner references specified + performTest(defaultNetworkControllerName, sync.initialLRPs, sync.finalLRPs, sync.v4ClusterSubnets, sync.v6ClusterSubnets, + sync.v4JoinSubnet, sync.v6JoinSubnet, nil) + }, + ginkgotable.Entry("does not modify LRP with owner references", lrpSync{ + initialLRPs: []*nbdb.LogicalRouterPolicy{getNoReRouteLRP(fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4PodClusterSubnetStr, v4JoinSubnetStr), + getEgressIPLRPNoReRoutePodToJoinDbIDs(v4IPFamilyValue, defaultNetworkName, "controller").GetExternalIDs(), nil), + }, + finalLRPs: []*nbdb.LogicalRouterPolicy{getNoReRouteLRP(fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4PodClusterSubnetStr, v4JoinSubnetStr), + getEgressIPLRPNoReRoutePodToJoinDbIDs(v4IPFamilyValue, defaultNetworkName, "controller").GetExternalIDs(), + nil), + }, + v4ClusterSubnets: []*net.IPNet{v4PodClusterSubnet}, + v4JoinSubnet: v4JoinSubnet, + }), + ginkgotable.Entry("updates IPv4 pod to pod, pod to join, pod to node with no references and does not modify LRP with references", lrpSync{ + initialLRPs: []*nbdb.LogicalRouterPolicy{ + { + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4PodClusterSubnetStr, v4PodClusterSubnetStr), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-UUID", + }, + { + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4PodClusterSubnetStr, v4JoinSubnetStr), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + }, + { + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("pkt.mark == %d", types.EgressIPReplyTrafficConnectionMark), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "egressip-no-reroute-reply-traffic-UUID", + ExternalIDs: getEgressIPLRPNoReRouteDbIDs(types.DefaultNoRereoutePriority, string(replyTrafficNoReroute), string(ipFamilyValue), defaultNetworkName, defaultNetworkControllerName).GetExternalIDs(), + }, + { + Priority: types.DefaultNoRereoutePriority, + Match: v4PodToNodeMatch, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "egressip-no-reroute-pod-node-UUID", + Options: map[string]string{"pkt_mark": "1008"}, + }, + }, + finalLRPs: []*nbdb.LogicalRouterPolicy{ + { + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4PodClusterSubnetStr, v4PodClusterSubnetStr), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(v4IPFamilyValue, defaultNetworkName, defaultNetworkControllerName).GetExternalIDs(), + }, + { + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4PodClusterSubnetStr, v4JoinSubnetStr), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(v4IPFamilyValue, defaultNetworkName, defaultNetworkControllerName).GetExternalIDs(), + }, + { + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("pkt.mark == %d", types.EgressIPReplyTrafficConnectionMark), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "egressip-no-reroute-reply-traffic-UUID", + ExternalIDs: getEgressIPLRPNoReRouteDbIDs(types.DefaultNoRereoutePriority, string(replyTrafficNoReroute), string(ipFamilyValue), defaultNetworkName, defaultNetworkControllerName).GetExternalIDs(), + }, + { + Priority: types.DefaultNoRereoutePriority, + Match: v4PodToNodeMatch, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "egressip-no-reroute-pod-node-UUID", + Options: map[string]string{"pkt_mark": "1008"}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(v4IPFamilyValue, defaultNetworkName, defaultNetworkControllerName).GetExternalIDs(), + }, + }, + v4ClusterSubnets: []*net.IPNet{v4PodClusterSubnet}, + v4JoinSubnet: v4JoinSubnet, + v6ClusterSubnets: []*net.IPNet{v6PodClusterSubnet}, + v6JoinSubnet: v6JoinSubnet, + }), + ginkgotable.Entry("updates IPv6 pod to pod, pod to join with no references and does not modify LRP with references", lrpSync{ + initialLRPs: []*nbdb.LogicalRouterPolicy{ + { + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip6.src == %s && ip6.dst == %s", v6PodClusterSubnetStr, v6PodClusterSubnetStr), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-UUID", + }, + { + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip6.src == %s && ip6.dst == %s", v6PodClusterSubnetStr, v6JoinSubnetStr), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + }, + { + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("pkt.mark == %d", types.EgressIPReplyTrafficConnectionMark), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "egressip-no-reroute-reply-traffic", + ExternalIDs: getEgressIPLRPNoReRouteDbIDs(types.DefaultNoRereoutePriority, string(replyTrafficNoReroute), string(ipFamilyValue), defaultNetworkName, defaultNetworkControllerName).GetExternalIDs(), + }, + { + Priority: types.DefaultNoRereoutePriority, + Match: v6PodToNodeMatch, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "egressip-no-reroute-pod-node-UUID", + Options: map[string]string{"pkt_mark": "1008"}, + }, + }, + finalLRPs: []*nbdb.LogicalRouterPolicy{ + { + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip6.src == %s && ip6.dst == %s", v6PodClusterSubnetStr, v6PodClusterSubnetStr), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(v6IPFamilyValue, defaultNetworkName, defaultNetworkControllerName).GetExternalIDs(), + }, + { + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip6.src == %s && ip6.dst == %s", v6PodClusterSubnetStr, v6JoinSubnetStr), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(v6IPFamilyValue, defaultNetworkName, defaultNetworkControllerName).GetExternalIDs(), + }, + { + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("pkt.mark == %d", types.EgressIPReplyTrafficConnectionMark), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "egressip-no-reroute-reply-traffic", + ExternalIDs: getEgressIPLRPNoReRouteDbIDs(types.DefaultNoRereoutePriority, string(replyTrafficNoReroute), string(ipFamilyValue), defaultNetworkName, defaultNetworkControllerName).GetExternalIDs(), + }, + { + Priority: types.DefaultNoRereoutePriority, + Match: v6PodToNodeMatch, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "egressip-no-reroute-pod-node-UUID", + Options: map[string]string{"pkt_mark": "1008"}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(v6IPFamilyValue, defaultNetworkName, defaultNetworkControllerName).GetExternalIDs(), + }, + }, + v6ClusterSubnets: []*net.IPNet{v6PodClusterSubnet}, + v6JoinSubnet: v6JoinSubnet, + }), + ginkgotable.Entry("updates IPv4 & IPv6 pod to pod, pod to join with no references and does not modify LRP with references", lrpSync{ + initialLRPs: []*nbdb.LogicalRouterPolicy{ + { + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4PodClusterSubnetStr, v4PodClusterSubnetStr), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "v4-default-no-reroute-UUID", + }, + { + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4PodClusterSubnetStr, v4JoinSubnetStr), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "v4-no-reroute-service-UUID", + }, + { + Priority: types.DefaultNoRereoutePriority, + Match: v4PodToNodeMatch, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "v4-egressip-no-reroute-pod-node-UUID", + Options: map[string]string{"pkt_mark": "1008"}, + }, + { + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip6.src == %s && ip6.dst == %s", v6PodClusterSubnetStr, v6PodClusterSubnetStr), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "v6-default-no-reroute-UUID", + }, + { + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip6.src == %s && ip6.dst == %s", v6PodClusterSubnetStr, v6JoinSubnetStr), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "v6-no-reroute-service-UUID", + }, + { + Priority: types.DefaultNoRereoutePriority, + Match: v6PodToNodeMatch, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "v6-egressip-no-reroute-pod-node-UUID", + Options: map[string]string{"pkt_mark": "1008"}, + }, + { + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("pkt.mark == %d", types.EgressIPReplyTrafficConnectionMark), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "egressip-no-reroute-reply-traffic", + ExternalIDs: getEgressIPLRPNoReRouteDbIDs(types.DefaultNoRereoutePriority, string(replyTrafficNoReroute), string(ipFamilyValue), defaultNetworkName, defaultNetworkControllerName).GetExternalIDs(), + }, + }, + finalLRPs: []*nbdb.LogicalRouterPolicy{ + { + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4PodClusterSubnetStr, v4PodClusterSubnetStr), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "v4-default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(v4IPFamilyValue, defaultNetworkName, defaultNetworkControllerName).GetExternalIDs(), + }, + { + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4PodClusterSubnetStr, v4JoinSubnetStr), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "v4-no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(v4IPFamilyValue, defaultNetworkName, defaultNetworkControllerName).GetExternalIDs(), + }, + { + Priority: types.DefaultNoRereoutePriority, + Match: v4PodToNodeMatch, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "v4-egressip-no-reroute-pod-node-UUID", + Options: map[string]string{"pkt_mark": "1008"}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(v4IPFamilyValue, defaultNetworkName, defaultNetworkControllerName).GetExternalIDs(), + }, + { + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip6.src == %s && ip6.dst == %s", v6PodClusterSubnetStr, v6PodClusterSubnetStr), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "v6-default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(v6IPFamilyValue, defaultNetworkName, defaultNetworkControllerName).GetExternalIDs(), + }, + { + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip6.src == %s && ip6.dst == %s", v6PodClusterSubnetStr, v6JoinSubnetStr), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "v6-no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(v6IPFamilyValue, defaultNetworkName, defaultNetworkControllerName).GetExternalIDs(), + }, + { + Priority: types.DefaultNoRereoutePriority, + Match: v6PodToNodeMatch, + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "v6-egressip-no-reroute-pod-node-UUID", + Options: map[string]string{"pkt_mark": "1008"}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(v6IPFamilyValue, defaultNetworkName, defaultNetworkControllerName).GetExternalIDs(), + }, + { + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("pkt.mark == %d", types.EgressIPReplyTrafficConnectionMark), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "egressip-no-reroute-reply-traffic", + ExternalIDs: getEgressIPLRPNoReRouteDbIDs(types.DefaultNoRereoutePriority, string(replyTrafficNoReroute), string(ipFamilyValue), defaultNetworkName, defaultNetworkControllerName).GetExternalIDs(), + }, + }, + v4ClusterSubnets: []*net.IPNet{v4PodClusterSubnet}, + v4JoinSubnet: v4JoinSubnet, + v6ClusterSubnets: []*net.IPNet{v6PodClusterSubnet}, + v6JoinSubnet: v6JoinSubnet, + }), + ) + }) +}) + +func performTest(controllerName string, initialLRPs, finalLRPs []*nbdb.LogicalRouterPolicy, v4Cluster, v6Cluster []*net.IPNet, v4Join, v6Join *net.IPNet, pods podsNetInfo) { + // build LSPs + var lspDB []libovsdbtest.TestData + podToIPs := map[string][]string{} + for _, pod := range pods { + key := composeNamespaceName(pod.namespace, pod.name) + entry, ok := podToIPs[key] + if !ok { + entry = []string{} + } + podToIPs[key] = append(entry, pod.ip.String()) + + } + var lspUUIDs []string + for namespaceName, ips := range podToIPs { + namespace, name := decomposeNamespaceName(namespaceName) + lsp := getLSP(namespace, name, ips...) + lspUUIDs = append(lspUUIDs, lsp.UUID) + lspDB = addLSPToTestData(lspDB, lsp) + } + // build switch + ls := getLS(lspUUIDs) + // build cluster router + clusterRouter := getClusterRouter(getLRPUUIDs(initialLRPs)...) + // build initiam DB + initialDB := addLSsToTestData([]libovsdbtest.TestData{clusterRouter}, ls) + initialDB = append(initialDB, lspDB...) + initialDB = addLRPsToTestData(initialDB, initialLRPs...) + + // build expected DB + expectedDB := addLSsToTestData([]libovsdbtest.TestData{clusterRouter}, ls) + expectedDB = addLRPsToTestData(expectedDB, finalLRPs...) + expectedDB = append(expectedDB, lspDB...) + + dbSetup := libovsdbtest.TestSetup{NBData: initialDB} + nbClient, cleanup, err := libovsdbtest.NewNBTestHarness(dbSetup, nil) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer cleanup.Cleanup() + + syncer := NewLRPSyncer(nbClient, controllerName) + config.IPv4Mode = false + config.IPv6Mode = false + config.Gateway.V4JoinSubnet = "" + config.Gateway.V6JoinSubnet = "" + config.Default.ClusterSubnets = []config.CIDRNetworkEntry{} + + if len(v4Cluster) != 0 && v4Join != nil { + config.IPv4Mode = true + config.Gateway.V4JoinSubnet = v4Join.String() + for _, clusterSubnet := range v4Cluster { + config.Default.ClusterSubnets = append(config.Default.ClusterSubnets, config.CIDRNetworkEntry{CIDR: clusterSubnet}) + } + } + if len(v6Cluster) != 0 && v6Join != nil { + config.IPv6Mode = true + config.Gateway.V6JoinSubnet = v6Join.String() + for _, clusterSubnet := range v6Cluster { + config.Default.ClusterSubnets = append(config.Default.ClusterSubnets, config.CIDRNetworkEntry{CIDR: clusterSubnet}) + } + } + gomega.Expect(syncer.Sync()).Should(gomega.Succeed()) + gomega.Eventually(nbClient).Should(libovsdbtest.HaveData(expectedDB)) +} + +func getNoReRouteLRP(match string, externalIDs map[string]string, options map[string]string) *nbdb.LogicalRouterPolicy { + return &nbdb.LogicalRouterPolicy{ + UUID: "test-lrp", + Action: nbdb.LogicalRouterPolicyActionAllow, + Match: match, + Priority: types.DefaultNoRereoutePriority, + ExternalIDs: externalIDs, + Options: options, + } +} + +func getReRouteLRP(podNamespace, podName, podIP string, mark int, ipFamily egressIPFamilyValue, nextHops []string, + externalIDs map[string]string, controller string) *nbdb.LogicalRouterPolicy { + lrp := &nbdb.LogicalRouterPolicy{ + Priority: types.EgressIPReroutePriority, + Match: fmt.Sprintf("%s.src == %s", ipFamily, podIP), + Action: nbdb.LogicalRouterPolicyActionReroute, + Nexthops: nextHops, + ExternalIDs: externalIDs, + UUID: getReRoutePolicyUUID(podNamespace, podName, ipFamily, controller), + } + if mark != 0 { + lrp.Options = getMarkOptions(mark) + } + return lrp +} + +func getReRoutePolicyUUID(podNamespace, podName string, ipFamily egressIPFamilyValue, controller string) string { + return fmt.Sprintf("%s-reroute-%s-%s-%s", controller, podNamespace, podName, ipFamily) +} + +func getMarkOptions(mark int) map[string]string { + return map[string]string{"pkt_mark": fmt.Sprintf("%d", mark)} +} + +func getEgressIPLRPNoReRouteDbIDs(priority int, uniqueName, ipFamily, network, controller string) *libovsdbops.DbObjectIDs { + return libovsdbops.NewDbObjectIDs(libovsdbops.LogicalRouterPolicyEgressIP, controller, map[libovsdbops.ExternalIDKey]string{ + // egress ip creates global no-reroute policies at 102 priority + libovsdbops.ObjectNameKey: uniqueName, + libovsdbops.PriorityKey: fmt.Sprintf("%d", priority), + libovsdbops.IPFamilyKey: ipFamily, + libovsdbops.NetworkKey: network, + }) +} + +func getClusterRouter(policies ...string) *nbdb.LogicalRouter { + if policies == nil { + policies = make([]string, 0) + } + return &nbdb.LogicalRouter{ + UUID: types.OVNClusterRouter + "-UUID", + Name: types.OVNClusterRouter, + Policies: policies, + } +} + +func getLS(ports []string) *nbdb.LogicalSwitch { + return &nbdb.LogicalSwitch{ + UUID: "switch-UUID", + Name: "switch", + Ports: ports, + } +} + +func getLSP(podNamespace, podName string, ips ...string) *nbdb.LogicalSwitchPort { + return &nbdb.LogicalSwitchPort{ + UUID: fmt.Sprintf("%s-%s-UUID", podNamespace, podName), + Addresses: append([]string{"0a:58:0a:f4:00:04"}, ips...), + ExternalIDs: map[string]string{"namespace": podNamespace, "pod": "true"}, + Name: util.GetLogicalPortName(podNamespace, podName), + } +} + +func getLRPUUIDs(lrps []*nbdb.LogicalRouterPolicy) []string { + lrpUUIDs := make([]string, 0) + for _, lrp := range lrps { + lrpUUIDs = append(lrpUUIDs, lrp.UUID) + } + return lrpUUIDs +} + +func addLRPsToTestData(data []libovsdbtest.TestData, lrps ...*nbdb.LogicalRouterPolicy) []libovsdbtest.TestData { + for _, lrp := range lrps { + data = append(data, lrp) + } + return data +} + +func addLSsToTestData(data []libovsdbtest.TestData, lss ...*nbdb.LogicalSwitch) []libovsdbtest.TestData { + for _, ls := range lss { + data = append(data, ls) + } + return data +} + +func addLSPToTestData(data []libovsdbtest.TestData, lsps ...*nbdb.LogicalSwitchPort) []libovsdbtest.TestData { + for _, lsp := range lsps { + data = append(data, lsp) + } + return data +} + +func composeNamespaceName(namespace, name string) string { + return fmt.Sprintf("%s/%s", namespace, name) +} + +func decomposeNamespaceName(str string) (string, string) { + split := strings.Split(str, "/") + return split[0], split[1] +} diff --git a/go-controller/pkg/ovn/external_ids_syncer/nat/nat_suite_test.go b/go-controller/pkg/ovn/external_ids_syncer/nat/nat_suite_test.go new file mode 100644 index 0000000000..6edae51454 --- /dev/null +++ b/go-controller/pkg/ovn/external_ids_syncer/nat/nat_suite_test.go @@ -0,0 +1,13 @@ +package nat + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestPortGroup(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "NAT Suite") +} diff --git a/go-controller/pkg/ovn/external_ids_syncer/nat/nat_sync.go b/go-controller/pkg/ovn/external_ids_syncer/nat/nat_sync.go new file mode 100644 index 0000000000..38e2387551 --- /dev/null +++ b/go-controller/pkg/ovn/external_ids_syncer/nat/nat_sync.go @@ -0,0 +1,177 @@ +package nat + +import ( + "fmt" + "net" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + ovntypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + + "k8s.io/klog/v2" + utilsnet "k8s.io/utils/net" +) + +type NATSyncer struct { + nbClient libovsdbclient.Client + controllerName string +} + +type podNetInfo struct { + ip []net.IP + namespace string + name string +} + +type podsNetInfo []podNetInfo + +const legacyEIPNameExtIDKey = "name" + +// getPodByIP attempts to find a reference to a pod by IP address. It will return the info if found, +// along with boolean true, otherwise returned boolean will be false. +func (p podsNetInfo) getPodByIP(ip net.IP) (podNetInfo, bool) { + for _, pod := range p { + for _, podIP := range pod.ip { + if podIP.Equal(ip) { + return pod, true + } + } + } + return podNetInfo{}, false +} + +type egressIPFamilyValue string + +var ( + ipFamilyValueV4 egressIPFamilyValue = "ip4" + ipFamilyValueV6 egressIPFamilyValue = "ip6" +) + +// NewNATSyncer adds owner references to a limited subnet of LRPs. controllerName is the name of the new controller that should own all LRPs without controller +func NewNATSyncer(nbClient libovsdbclient.Client, controllerName string) *NATSyncer { + return &NATSyncer{ + nbClient: nbClient, + controllerName: controllerName, + } +} + +func (n *NATSyncer) Sync() error { + if err := n.syncEgressIPNATs(); err != nil { + return fmt.Errorf("failed to sync EgressIP NATs: %v", err) + } + return nil +} + +func (n *NATSyncer) syncEgressIPNATs() error { + v4PodCache, v6PodCache, err := n.buildPodCache() + if err != nil { + return fmt.Errorf("failed to build pod cache: %v", err) + } + noOwnerFn := libovsdbops.GetNoOwnerPredicate[*nbdb.NAT]() + p := func(item *nbdb.NAT) bool { + return noOwnerFn(item) && item.ExternalIDs[legacyEIPNameExtIDKey] != "" && item.Type == nbdb.NATTypeSNAT && item.LogicalIP != "" + } + nats, err := libovsdbops.FindNATsWithPredicate(n.nbClient, p) + if err != nil { + return fmt.Errorf("failed to retrieve OVN NATs: %v", err) + } + var ops []libovsdb.Operation + for _, nat := range nats { + eIPName := nat.ExternalIDs[legacyEIPNameExtIDKey] + if eIPName == "" { + klog.Errorf("Expected NAT %s to contain 'name' as a key within its external IDs", nat.UUID) + continue + } + podIP, _, err := net.ParseCIDR(nat.LogicalIP) + if err != nil { + klog.Errorf("Failed to process logical IP %q of NAT %s", nat.LogicalIP, nat.UUID) + continue + } + isV6 := utilsnet.IsIPv6(podIP) + var ipFamily egressIPFamilyValue + var pod podNetInfo + var found bool + if isV6 { + ipFamily = ipFamilyValueV6 + pod, found = v6PodCache.getPodByIP(podIP) + } else { + ipFamily = ipFamilyValueV4 + pod, found = v4PodCache.getPodByIP(podIP) + } + if !found { + klog.Errorf("Failed to find logical switch port that contains IP address %s", podIP.String()) + continue + } + nat.ExternalIDs = getEgressIPNATDbIDs(eIPName, pod.namespace, pod.name, ipFamily, n.controllerName).GetExternalIDs() + ops, err = libovsdbops.UpdateNATOps(n.nbClient, ops, nat) + if err != nil { + klog.Errorf("Failed to generate NAT ops for NAT %s: %v", nat.UUID, err) + } + klog.Infof("## martin found %d nats", len(ops)) + } + + _, err = libovsdbops.TransactAndCheck(n.nbClient, ops) + if err != nil { + return fmt.Errorf("failed to transact pod to node subnet sync ops: %v", err) + } + return nil +} + +func (n *NATSyncer) buildPodCache() (podsNetInfo, podsNetInfo, error) { + p := func(item *nbdb.LogicalSwitchPort) bool { + return item.ExternalIDs["pod"] == "true" && item.ExternalIDs[ovntypes.NADExternalID] == "" // ignore secondary network LSPs + } + lsps, err := libovsdbops.FindLogicalSwitchPortWithPredicate(n.nbClient, p) + if err != nil { + return nil, nil, fmt.Errorf("failed to get logical switch ports: %v", err) + } + v4Pods, v6Pods := make(podsNetInfo, 0), make(podsNetInfo, 0) + for _, lsp := range lsps { + namespaceName, podName := util.GetNamespacePodFromCDNPortName(lsp.Name) + if namespaceName == "" || podName == "" { + klog.Errorf("Failed to extract namespace / pod from logical switch port %s", lsp.Name) + continue + } + if len(lsp.Addresses) == 0 { + klog.Errorf("Address(es) not set for pod %s/%s", namespaceName, podName) + continue + } + var ( + v4IPs []net.IP + v6IPs []net.IP + ) + for i := 1; i < len(lsp.Addresses); i++ { + // CIDR is supported field within OVN, but for CDN we only set IP + ip := net.ParseIP(lsp.Addresses[i]) + if ip == nil { + klog.Errorf("Failed to extract IP %q from logical switch port for pod %s/%s", lsp.Addresses[i], namespaceName, podName) + continue + } + + if utilsnet.IsIPv6(ip) { + v6IPs = append(v6IPs, ip) + } else { + v4IPs = append(v4IPs, ip) + } + } + if len(v4IPs) > 0 { + v4Pods = append(v4Pods, podNetInfo{ip: v4IPs, namespace: namespaceName, name: podName}) + } + if len(v6IPs) > 0 { + v6Pods = append(v6Pods, podNetInfo{ip: v6IPs, namespace: namespaceName, name: podName}) + } + } + + return v4Pods, v6Pods, nil +} + +// getEgressIPNATDbIDs is copied from ovn pkg to avoid dependency +func getEgressIPNATDbIDs(eIPName, podNamespace, podName string, ipFamily egressIPFamilyValue, controller string) *libovsdbops.DbObjectIDs { + return libovsdbops.NewDbObjectIDs(libovsdbops.NATEgressIP, controller, map[libovsdbops.ExternalIDKey]string{ + libovsdbops.ObjectNameKey: fmt.Sprintf("%s_%s/%s", eIPName, podNamespace, podName), + libovsdbops.IPFamilyKey: string(ipFamily), + }) +} diff --git a/go-controller/pkg/ovn/external_ids_syncer/nat/nat_sync_test.go b/go-controller/pkg/ovn/external_ids_syncer/nat/nat_sync_test.go new file mode 100644 index 0000000000..c3b04d4c22 --- /dev/null +++ b/go-controller/pkg/ovn/external_ids_syncer/nat/nat_sync_test.go @@ -0,0 +1,251 @@ +package nat + +import ( + "fmt" + "net" + "strings" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" + libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + + ginkgotable "github.com/onsi/ginkgo/extensions/table" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" +) + +type natSync struct { + initialNATs []*nbdb.NAT + finalNATs []*nbdb.NAT + pods podsNetInfo +} + +const ( + egressIPName = "eip1" + egressIP = "10.10.10.10" + nat1UUID = "nat-1-UUID" + nat2UUID = "nat-2-UUID" + pod1V4CIDRStr = "10.128.0.5/32" + pod1V6CIDRStr = "2001:0000:130F:0000:0000:09C0:876A:130B/128" + pod1Namespace = "ns1" + pod1Name = "pod1" + pod2V4CIDRStr = "10.128.0.6/32" + pod2V6CIDRStr = "2001:0000:130F:0000:0000:09C0:876A:130A/128" + pod2Namespace = "ns1" + pod2Name = "pod2" + defaultNetworkControllerName = "default-network-controller" +) + +var ( + pod1V4IPNet = testing.MustParseIPNet(pod1V4CIDRStr) + pod1V6IPNet = testing.MustParseIPNet(pod1V6CIDRStr) + pod2V4IPNet = testing.MustParseIPNet(pod2V4CIDRStr) + pod2V6IPNet = testing.MustParseIPNet(pod2V6CIDRStr) + legacyExtIDs = map[string]string{legacyEIPNameExtIDKey: egressIPName} + pod1V4ExtIDs = getEgressIPNATDbIDs(egressIPName, pod1Namespace, pod1Name, ipFamilyValueV4, defaultNetworkControllerName).GetExternalIDs() + pod1V6ExtIDs = getEgressIPNATDbIDs(egressIPName, pod1Namespace, pod1Name, ipFamilyValueV6, defaultNetworkControllerName).GetExternalIDs() +) + +var _ = ginkgo.Describe("NAT Syncer", func() { + + ginkgo.Context("EgressIP", func() { + + ginkgotable.DescribeTable("egress NATs", func(sync natSync) { + performTest(defaultNetworkControllerName, sync.initialNATs, sync.finalNATs, sync.pods) + }, ginkgotable.Entry("converts legacy IPv4 NATs", natSync{ + initialNATs: []*nbdb.NAT{getSNAT(nat1UUID, pod1V4CIDRStr, egressIP, legacyExtIDs)}, + finalNATs: []*nbdb.NAT{getSNAT(nat1UUID, pod1V4CIDRStr, egressIP, pod1V4ExtIDs)}, + pods: podsNetInfo{ + { + []net.IP{pod1V4IPNet.IP}, + pod1Namespace, + pod1Name, + }, + { + []net.IP{pod2V4IPNet.IP}, + pod2Namespace, + pod2Name, + }, + }, + }), + ginkgotable.Entry("converts legacy IPv6 NATs", natSync{ + initialNATs: []*nbdb.NAT{getSNAT(nat1UUID, pod1V6CIDRStr, egressIP, legacyExtIDs)}, + finalNATs: []*nbdb.NAT{getSNAT(nat1UUID, pod1V6CIDRStr, egressIP, pod1V6ExtIDs)}, + pods: podsNetInfo{ + { + []net.IP{pod1V6IPNet.IP}, + pod1Namespace, + pod1Name, + }, + { + []net.IP{pod2V6IPNet.IP}, + pod2Namespace, + pod2Name, + }, + }, + }), + ginkgotable.Entry("converts legacy dual stack NATs", natSync{ + initialNATs: []*nbdb.NAT{getSNAT(nat1UUID, pod1V4CIDRStr, egressIP, legacyExtIDs), getSNAT(nat2UUID, pod1V6CIDRStr, egressIP, legacyExtIDs)}, + finalNATs: []*nbdb.NAT{getSNAT(nat1UUID, pod1V4CIDRStr, egressIP, pod1V4ExtIDs), getSNAT(nat2UUID, pod1V6CIDRStr, egressIP, pod1V6ExtIDs)}, + pods: podsNetInfo{ + { + []net.IP{pod1V4IPNet.IP, pod1V6IPNet.IP}, + pod1Namespace, + pod1Name, + }, + { + []net.IP{pod2V4IPNet.IP, pod2V6IPNet.IP}, + pod2Namespace, + pod2Name, + }, + }, + }), + ginkgotable.Entry("doesn't alter NAT with correct external IDs", natSync{ + initialNATs: []*nbdb.NAT{getSNAT(nat1UUID, pod1V6CIDRStr, egressIP, pod1V6ExtIDs)}, + finalNATs: []*nbdb.NAT{getSNAT(nat1UUID, pod1V6CIDRStr, egressIP, pod1V6ExtIDs)}, + pods: podsNetInfo{ + { + []net.IP{pod1V4IPNet.IP, pod1V6IPNet.IP}, + pod1Namespace, + pod1Name, + }, + { + []net.IP{pod2V4IPNet.IP, pod2V6IPNet.IP}, + pod2Namespace, + pod2Name, + }, + }, + }), + ) + }) +}) + +func performTest(controllerName string, initialNATS, finalNATs []*nbdb.NAT, pods podsNetInfo) { + // build LSPs + var lspDB []libovsdbtest.TestData + podToIPs := map[string][]net.IP{} + for _, podInfo := range pods { + key := composeNamespaceName(podInfo.namespace, podInfo.name) + entry, ok := podToIPs[key] + if !ok { + entry = []net.IP{} + } + podToIPs[key] = append(entry, podInfo.ip...) + } + var lspUUIDs []string + for namespaceName, ips := range podToIPs { + namespace, name := decomposeNamespaceName(namespaceName) + lsp := getLSP(namespace, name, ips...) + lspUUIDs = append(lspUUIDs, lsp.UUID) + lspDB = addLSPToTestData(lspDB, lsp) + } + // build switch + ls := getLS(lspUUIDs) + // build cluster router + router := getRouterWithNATs(getNATsUUIDs(initialNATS)...) + // build initial DB + initialDB := addLSsToTestData([]libovsdbtest.TestData{router}, ls) + initialDB = append(initialDB, lspDB...) + initialDB = addNATToTestData(initialDB, initialNATS...) + + // build expected DB + expectedDB := addLSsToTestData([]libovsdbtest.TestData{router}, ls) + expectedDB = addNATToTestData(expectedDB, finalNATs...) + expectedDB = append(expectedDB, lspDB...) + + dbSetup := libovsdbtest.TestSetup{NBData: initialDB} + nbClient, cleanup, err := libovsdbtest.NewNBTestHarness(dbSetup, nil) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer cleanup.Cleanup() + + syncer := NewNATSyncer(nbClient, controllerName) + config.IPv4Mode = false + config.IPv6Mode = false + + gomega.Expect(syncer.Sync()).Should(gomega.Succeed()) + gomega.Eventually(nbClient).Should(libovsdbtest.HaveData(expectedDB)) +} + +func getRouterWithNATs(policies ...string) *nbdb.LogicalRouter { + if policies == nil { + policies = make([]string, 0) + } + return &nbdb.LogicalRouter{ + UUID: "router-UUID", + Name: "router", + Nat: policies, + } +} + +func getLS(ports []string) *nbdb.LogicalSwitch { + return &nbdb.LogicalSwitch{ + UUID: "switch-UUID", + Name: "switch", + Ports: ports, + } +} + +func getLSP(podNamespace, podName string, ips ...net.IP) *nbdb.LogicalSwitchPort { + var ipsStr []string + for _, ip := range ips { + ipsStr = append(ipsStr, ip.String()) + } + return &nbdb.LogicalSwitchPort{ + UUID: fmt.Sprintf("%s-%s-UUID", podNamespace, podName), + Addresses: append([]string{"0a:58:0a:f4:00:04"}, ipsStr...), + ExternalIDs: map[string]string{"namespace": podNamespace, "pod": "true"}, + Name: util.GetLogicalPortName(podNamespace, podName), + } +} + +func getNATsUUIDs(nats []*nbdb.NAT) []string { + natUUIDs := make([]string, 0) + for _, nat := range nats { + natUUIDs = append(natUUIDs, nat.UUID) + } + return natUUIDs +} + +func addLSsToTestData(data []libovsdbtest.TestData, lss ...*nbdb.LogicalSwitch) []libovsdbtest.TestData { + for _, ls := range lss { + data = append(data, ls) + } + return data +} + +func addLSPToTestData(data []libovsdbtest.TestData, lsps ...*nbdb.LogicalSwitchPort) []libovsdbtest.TestData { + for _, lsp := range lsps { + data = append(data, lsp) + } + return data +} + +func addNATToTestData(data []libovsdbtest.TestData, nats ...*nbdb.NAT) []libovsdbtest.TestData { + for _, nat := range nats { + data = append(data, nat) + } + return data +} + +func composeNamespaceName(namespace, name string) string { + return fmt.Sprintf("%s/%s", namespace, name) +} + +func decomposeNamespaceName(str string) (string, string) { + split := strings.Split(str, "/") + return split[0], split[1] +} + +func getSNAT(uuid, logicalIP, extIP string, extIDs map[string]string) *nbdb.NAT { + port := "node-1" + return &nbdb.NAT{ + UUID: uuid, + ExternalIDs: extIDs, + ExternalIP: extIP, + LogicalIP: logicalIP, + LogicalPort: &port, + Type: nbdb.NATTypeSNAT, + } +} diff --git a/go-controller/pkg/ovn/external_ids_syncer/port_group/port_gorup_suite_test.go b/go-controller/pkg/ovn/external_ids_syncer/port_group/port_gorup_suite_test.go index dde4a626c2..be926c3fa2 100644 --- a/go-controller/pkg/ovn/external_ids_syncer/port_group/port_gorup_suite_test.go +++ b/go-controller/pkg/ovn/external_ids_syncer/port_group/port_gorup_suite_test.go @@ -3,7 +3,7 @@ package port_group import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/ovn/external_ids_syncer/port_group/port_group_sync_test.go b/go-controller/pkg/ovn/external_ids_syncer/port_group/port_group_sync_test.go index 0ca461f0d7..294ee8c61b 100644 --- a/go-controller/pkg/ovn/external_ids_syncer/port_group/port_group_sync_test.go +++ b/go-controller/pkg/ovn/external_ids_syncer/port_group/port_group_sync_test.go @@ -2,7 +2,7 @@ package port_group import ( "fmt" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" diff --git a/go-controller/pkg/ovn/gateway.go b/go-controller/pkg/ovn/gateway.go index 82de23f4bb..9e732f1c02 100644 --- a/go-controller/pkg/ovn/gateway.go +++ b/go-controller/pkg/ovn/gateway.go @@ -7,6 +7,7 @@ import ( "strconv" "strings" + "golang.org/x/exp/maps" kapi "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -154,10 +155,11 @@ func WithLoadBalancerGroups(routerLBGroup, clusterLBGroup, switchLBGroup string) // NOTE2: egressIP SNATs are synced in EIP controller. // TODO (tssurya): Add support cleaning up even if disableSNATMultipleGWs=false, we'd need to remove the perPod // SNATs in case someone switches between these modes. See https://github.com/ovn-org/ovn-kubernetes/issues/3232 -func (gw *GatewayManager) cleanupStalePodSNATs(nodeName string, nodeIPs []*net.IPNet) error { +func (gw *GatewayManager) cleanupStalePodSNATs(nodeName string, nodeIPs []*net.IPNet, gwLRPIPs []net.IP) error { if !config.Gateway.DisableSNATMultipleGWs { return nil } + pods, err := gw.kube.GetPods(metav1.NamespaceAll, metav1.ListOptions{ FieldSelector: fields.OneTermEqualSelector("spec.nodeName", nodeName).String(), }) @@ -165,13 +167,7 @@ func (gw *GatewayManager) cleanupStalePodSNATs(nodeName string, nodeIPs []*net.I return fmt.Errorf("unable to list existing pods on node: %s, %w", nodeName, err) } - gatewayRouter := nbdb.LogicalRouter{ - Name: gw.gwRouterName, - } - routerNats, err := libovsdbops.GetRouterNATs(gw.nbClient, &gatewayRouter) - if err != nil && errors.Is(err, libovsdbclient.ErrNotFound) { - return fmt.Errorf("unable to get NAT entries for router %s on node %s: %w", gatewayRouter.Name, nodeName, err) - } + podIPsOnNode := sets.NewString() // collects all podIPs on node for _, pod := range pods { pod := *pod @@ -203,18 +199,38 @@ func (gw *GatewayManager) cleanupStalePodSNATs(nodeName string, nodeIPs []*net.I podIPsOnNode.Insert(podIP.String()) } } + + gatewayRouter := nbdb.LogicalRouter{ + Name: gw.gwRouterName, + } + routerNats, err := libovsdbops.GetRouterNATs(gw.nbClient, &gatewayRouter) + if err != nil && errors.Is(err, libovsdbclient.ErrNotFound) { + return fmt.Errorf("unable to get NAT entries for router %s on node %s: %w", gatewayRouter.Name, nodeName, err) + } + + nodeIPset := sets.New(util.IPNetsIPToStringSlice(nodeIPs)...) + gwLRPIPset := sets.New(util.StringSlice(gwLRPIPs)...) natsToDelete := []*nbdb.NAT{} for _, routerNat := range routerNats { routerNat := routerNat if routerNat.Type != nbdb.NATTypeSNAT { continue } - for _, nodeIP := range nodeIPs { - logicalIP := net.ParseIP(routerNat.LogicalIP) - if routerNat.ExternalIP == nodeIP.IP.String() && !config.ContainsJoinIP(logicalIP) && !podIPsOnNode.Has(routerNat.LogicalIP) { - natsToDelete = append(natsToDelete, routerNat) - } + if !nodeIPset.Has(routerNat.ExternalIP) { + continue + } + if podIPsOnNode.Has(routerNat.LogicalIP) { + continue + } + if gwLRPIPset.Has(routerNat.LogicalIP) { + continue } + logicalIP := net.ParseIP(routerNat.LogicalIP) + if logicalIP == nil { + // this is probably a CIDR so not a pod IP + continue + } + natsToDelete = append(natsToDelete, routerNat) } if len(natsToDelete) > 0 { err := libovsdbops.DeleteNATs(gw.nbClient, &gatewayRouter, natsToDelete...) @@ -233,14 +249,23 @@ func (gw *GatewayManager) GatewayInit( hostSubnets []*net.IPNet, l3GatewayConfig *util.L3GatewayConfig, sctpSupport bool, - gwLRPIfAddrs, drLRPIfAddrs []*net.IPNet, + gwLRPJoinIPs, drLRPIfAddrs []*net.IPNet, externalIPs []net.IP, enableGatewayMTU bool, ) error { gwLRPIPs := make([]net.IP, 0) - for _, gwLRPIfAddr := range gwLRPIfAddrs { - gwLRPIPs = append(gwLRPIPs, gwLRPIfAddr.IP) + for _, gwLRPJoinIP := range gwLRPJoinIPs { + gwLRPIPs = append(gwLRPIPs, gwLRPJoinIP.IP) + } + if gw.netInfo.TopologyType() == types.Layer2Topology { + // At layer2 GR LRP acts as the layer3 ovn_cluster_router so we need + // to configure here the .1 address, this will work only for IC with + // one node per zone, since ARPs for .1 will not go beyond local switch. + // This is being done to add the ICMP SNATs for .1 podSubnet that OVN GR generates + for _, subnet := range hostSubnets { + gwLRPIPs = append(gwLRPIPs, util.GetNodeGatewayIfAddr(subnet).IP) + } } // Create a gateway router. @@ -262,16 +287,26 @@ func (gw *GatewayManager) GatewayInit( if gw.netInfo.GetNetworkName() == types.DefaultNetworkName { logicalRouterOptions["snat-ct-zone"] = "0" } + if gw.netInfo.TopologyType() == types.Layer2Topology { + // When multiple networks are set of the same logical-router-port + // the networks get lexicographically sorted; thus there is no + // ordering or telling on which IP will be chosen as the router-ip + // when it comes to SNATing traffic after load balancing. + // Hence for Layer2 UDPNs let's set the snat-ip explicitly to the + // joinsubnetIP + joinIPDualStack := make([]string, len(gwLRPJoinIPs)) + for i, gwLRPJoinIP := range gwLRPJoinIPs { + joinIPDualStack[i] = gwLRPJoinIP.IP.String() + } + logicalRouterOptions["lb_force_snat_ip"] = strings.Join(joinIPDualStack, " ") + } logicalRouterExternalIDs := map[string]string{ "physical_ip": physicalIPs[0], "physical_ips": strings.Join(physicalIPs, ","), } if gw.netInfo.IsSecondary() { - networkName := gw.netInfo.GetNetworkName() - topologyType := gw.netInfo.TopologyType() - logicalRouterExternalIDs[types.NetworkExternalID] = networkName - logicalRouterExternalIDs[types.TopologyExternalID] = topologyType + maps.Copy(logicalRouterExternalIDs, util.GenerateExternalIDsForSwitchOrRouter(gw.netInfo)) } logicalRouter := nbdb.LogicalRouter{ @@ -321,6 +356,15 @@ func (gw *GatewayManager) GatewayInit( gwSwitchPort := types.JoinSwitchToGWRouterPrefix + gatewayRouter gwRouterPort := types.GWRouterToJoinSwitchPrefix + gatewayRouter + // In Layer2 networks there is no join switch and the gw.joinSwitchName points to the cluster switch. + // Ensure that the ports are named appropriately, this is important for the logical router policies + // created for local node access. + // TODO(kyrtapz): Clean this up for clarity as part of https://github.com/ovn-org/ovn-kubernetes/issues/4689 + if gw.netInfo.TopologyType() == types.Layer2Topology { + gwSwitchPort = types.SwitchToRouterPrefix + gw.joinSwitchName + gwRouterPort = types.RouterToSwitchPrefix + gw.joinSwitchName + } + logicalSwitchPort := nbdb.LogicalSwitchPort{ Name: gwSwitchPort, Type: "router", @@ -334,6 +378,23 @@ func (gw *GatewayManager) GatewayInit( types.NetworkExternalID: gw.netInfo.GetNetworkName(), types.TopologyExternalID: gw.netInfo.TopologyType(), } + if gw.netInfo.TopologyType() == types.Layer2Topology { + node, err := gw.watchFactory.GetNode(nodeName) + if err != nil { + return fmt.Errorf("failed to fetch node %s from watch factory %w", node, err) + } + tunnelID, err := util.ParseUDNLayer2NodeGRLRPTunnelIDs(node, gw.netInfo.GetNetworkName()) + if err != nil { + if util.IsAnnotationNotSetError(err) { + // remote node may not have the annotation yet, suppress it + return types.NewSuppressedError(err) + } + // Don't consider this node as cluster-manager has not allocated node id yet. + return fmt.Errorf("failed to fetch tunnelID annotation from the node %s for network %s, err: %w", + nodeName, gw.netInfo.GetNetworkName(), err) + } + logicalSwitchPort.Options["requested-tnl-key"] = strconv.Itoa(tunnelID) + } } sw := nbdb.LogicalSwitch{Name: gw.joinSwitchName} err = libovsdbops.CreateOrUpdateLogicalSwitchPortsOnSwitch(gw.nbClient, &sw, &logicalSwitchPort) @@ -343,8 +404,16 @@ func (gw *GatewayManager) GatewayInit( gwLRPMAC := util.IPAddrToHWAddr(gwLRPIPs[0]) gwLRPNetworks := []string{} - for _, gwLRPIfAddr := range gwLRPIfAddrs { - gwLRPNetworks = append(gwLRPNetworks, gwLRPIfAddr.String()) + for _, gwLRPJoinIP := range gwLRPJoinIPs { + gwLRPNetworks = append(gwLRPNetworks, gwLRPJoinIP.String()) + } + if gw.netInfo.TopologyType() == types.Layer2Topology { + // At layer2 GR LRP acts as the layer3 ovn_cluster_router so we need + // to configure here the .1 address, this will work only for IC with + // one node per zone, since ARPs for .1 will not go beyond local switch. + for _, subnet := range hostSubnets { + gwLRPNetworks = append(gwLRPNetworks, util.GetNodeGatewayIfAddr(subnet).String()) + } } var options map[string]string @@ -364,6 +433,19 @@ func (gw *GatewayManager) GatewayInit( types.NetworkExternalID: gw.netInfo.GetNetworkName(), types.TopologyExternalID: gw.netInfo.TopologyType(), } + _, isNetIPv6 := gw.netInfo.IPMode() + if gw.netInfo.TopologyType() == types.Layer2Topology && isNetIPv6 && config.IPv6Mode { + logicalRouterPort.Ipv6RaConfigs = map[string]string{ + "address_mode": "dhcpv6_stateful", + "send_periodic": "true", + "max_interval": "900", // 15 minutes + "min_interval": "300", // 5 minutes + "router_preference": "LOW", // The static gateway configured by CNI is MEDIUM, so make this SLOW so it has less effect for pods + } + if gw.netInfo.MTU() > 0 { + logicalRouterPort.Ipv6RaConfigs["mtu"] = fmt.Sprintf("%d", gw.netInfo.MTU()) + } + } } err = libovsdbops.CreateOrUpdateLogicalRouterPort(gw.nbClient, &logicalRouter, @@ -449,7 +531,7 @@ func (gw *GatewayManager) GatewayInit( return fmt.Errorf("failed to remove stale masquerade resources from northbound database: %w", err) } - if err := gateway.CreateDummyGWMacBindings(gw.nbClient, gatewayRouter); err != nil { + if err := gateway.CreateDummyGWMacBindings(gw.nbClient, gatewayRouter, gw.netInfo); err != nil { return err } @@ -576,7 +658,7 @@ func (gw *GatewayManager) GatewayInit( // management port interface for the hostSubnet prefix before adding the routes // towards join switch. mgmtIfAddr := util.GetNodeManagementIfAddr(hostSubnet) - gw.staticRouteCleanup([]net.IP{mgmtIfAddr.IP}) + gw.staticRouteCleanup([]net.IP{mgmtIfAddr.IP}, hostSubnet) if err := libovsdbops.CreateOrReplaceLogicalRouterStaticRouteWithPredicate( gw.nbClient, @@ -589,12 +671,15 @@ func (gw *GatewayManager) GatewayInit( } } else if config.Gateway.Mode == config.GatewayModeLocal { // If migrating from shared to local gateway, let's remove the static routes towards - // join switch for the hostSubnet prefix + // join switch for the hostSubnet prefix and any potential routes for UDN enabled services. // Note syncManagementPort happens before gateway sync so only remove things pointing to join subnet if gw.clusterRouterName != "" { p := func(item *nbdb.LogicalRouterStaticRoute) bool { + if _, ok := item.ExternalIDs[types.UDNEnabledServiceExternalID]; ok { + return true + } return item.IPPrefix == lrsr.IPPrefix && item.Policy != nil && *item.Policy == *lrsr.Policy && - config.ContainsJoinIP(net.ParseIP(item.Nexthop)) + gw.containsJoinIP(net.ParseIP(item.Nexthop)) } err := libovsdbops.DeleteLogicalRouterStaticRoutesWithPredicate(gw.nbClient, gw.clusterRouterName, p) if err != nil { @@ -625,6 +710,7 @@ func (gw *GatewayManager) GatewayInit( if nat.Type != nbdb.NATTypeSNAT { continue } + // check external ip changed for _, externalIP := range externalIPs { oldExternalIP, err := util.MatchFirstIPFamily(utilnet.IsIPv6(externalIP), oldExtIPs) @@ -642,10 +728,11 @@ func (gw *GatewayManager) GatewayInit( } } + // note, nat.LogicalIP may be a CIDR or IP, we don't care unless it's an IP parsedLogicalIP := net.ParseIP(nat.LogicalIP) // check if join ip changed - if config.ContainsJoinIP(parsedLogicalIP) { + if gw.containsJoinIP(parsedLogicalIP) { // is a join SNAT, check if IP needs updating joinIP, err := util.MatchFirstIPFamily(utilnet.IsIPv6(parsedLogicalIP), gwLRPIPs) if err != nil { @@ -699,6 +786,7 @@ func (gw *GatewayManager) GatewayInit( nats := make([]*nbdb.NAT, 0, len(clusterIPSubnet)) var nat *nbdb.NAT + if !config.Gateway.DisableSNATMultipleGWs { // Default SNAT rules. DisableSNATMultipleGWs=false in LGW (traffic egresses via mp0) always. // We are not checking for gateway mode to be shared explicitly to reduce topology differences. @@ -708,7 +796,8 @@ func (gw *GatewayManager) GatewayInit( return fmt.Errorf("failed to create default SNAT rules for gateway router %s: %v", gatewayRouter, err) } - nat = libovsdbops.BuildSNAT(&externalIP[0], entry, "", extIDs) + + nat = libovsdbops.BuildSNATWithMatch(&externalIP[0], entry, "", extIDs, gw.netInfo.GetNetworkScopedClusterSubnetSNATMatch(nodeName)) nats = append(nats, nat) } err := libovsdbops.CreateOrUpdateNATs(gw.nbClient, &logicalRouter, nats...) @@ -718,7 +807,7 @@ func (gw *GatewayManager) GatewayInit( } else { // ensure we do not have any leftover SNAT entries after an upgrade for _, logicalSubnet := range clusterIPSubnet { - nat = libovsdbops.BuildSNAT(nil, logicalSubnet, "", extIDs) + nat = libovsdbops.BuildSNATWithMatch(nil, logicalSubnet, "", extIDs, gw.netInfo.GetNetworkScopedClusterSubnetSNATMatch(nodeName)) nats = append(nats, nat) } err := libovsdbops.DeleteNATs(gw.nbClient, &logicalRouter, nats...) @@ -727,7 +816,7 @@ func (gw *GatewayManager) GatewayInit( } } - if err := gw.cleanupStalePodSNATs(nodeName, l3GatewayConfig.IPAddresses); err != nil { + if err := gw.cleanupStalePodSNATs(nodeName, l3GatewayConfig.IPAddresses, gwLRPIPs); err != nil { return fmt.Errorf("failed to sync stale SNATs on node %s: %v", nodeName, err) } @@ -832,10 +921,7 @@ func (gw *GatewayManager) addExternalSwitch(prefix, interfaceID, nodeName, gatew } sw := nbdb.LogicalSwitch{Name: externalSwitch} if gw.netInfo.IsSecondary() { - sw.ExternalIDs = map[string]string{ - types.NetworkExternalID: gw.netInfo.GetNetworkName(), - types.TopologyExternalID: gw.netInfo.TopologyType(), - } + sw.ExternalIDs = util.GenerateExternalIDsForSwitchOrRouter(gw.netInfo) } err = libovsdbops.CreateOrUpdateLogicalSwitchPortsAndSwitch(gw.nbClient, &sw, &externalLogicalSwitchPort, &externalLogicalSwitchPortToRouter) @@ -992,6 +1078,17 @@ func (gw *GatewayManager) Cleanup() error { var nextHops []net.IP gwRouterToJoinSwitchPortName := types.GWRouterToJoinSwitchPrefix + gw.gwRouterName + portName := types.JoinSwitchToGWRouterPrefix + gw.gwRouterName + + // In Layer2 networks there is no join switch and the gw.joinSwitchName points to the cluster switch. + // Ensure that the ports are named appropriately, this is important for the logical router policies + // created for local node access. + // TODO(kyrtapz): Clean this up for clarity as part of https://github.com/ovn-org/ovn-kubernetes/issues/4689 + if gw.netInfo.TopologyType() == types.Layer2Topology { + gwRouterToJoinSwitchPortName = types.RouterToSwitchPrefix + gw.joinSwitchName + portName = types.SwitchToRouterPrefix + gw.joinSwitchName + } + gwIPAddrs, err := libovsdbutil.GetLRPAddrs(gw.nbClient, gwRouterToJoinSwitchPortName) if err != nil && !errors.Is(err, libovsdbclient.ErrNotFound) { return fmt.Errorf( @@ -1005,11 +1102,10 @@ func (gw *GatewayManager) Cleanup() error { for _, gwIPAddr := range gwIPAddrs { nextHops = append(nextHops, gwIPAddr.IP) } - gw.staticRouteCleanup(nextHops) + gw.staticRouteCleanup(nextHops, nil) gw.policyRouteCleanup(nextHops) // Remove the patch port that connects join switch to gateway router - portName := types.JoinSwitchToGWRouterPrefix + gw.gwRouterName lsp := nbdb.LogicalSwitchPort{Name: portName} sw := nbdb.LogicalSwitch{Name: gw.joinSwitchName} err = libovsdbops.DeleteLogicalSwitchPorts(gw.nbClient, &sw, &lsp) @@ -1028,7 +1124,7 @@ func (gw *GatewayManager) Cleanup() error { } // Remove the static mac bindings of the gateway router - err = gateway.DeleteDummyGWMacBindings(gw.nbClient, gw.gwRouterName) + err = gateway.DeleteDummyGWMacBindings(gw.nbClient, gw.gwRouterName, gw.netInfo) if err != nil { return fmt.Errorf("failed to delete GR dummy mac bindings for node %s: %w", gw.nodeName, err) } @@ -1074,7 +1170,7 @@ func (gw *GatewayManager) delPbrAndNatRules(nodeName string) { gw.removeLRPolicies(nodeName) } -func (gw *GatewayManager) staticRouteCleanup(nextHops []net.IP) { +func (gw *GatewayManager) staticRouteCleanup(nextHops []net.IP, ipPrefix *net.IPNet) { if len(nextHops) == 0 { return // if we do not have next hops, we do not have any routes to cleanup } @@ -1090,6 +1186,9 @@ func (gw *GatewayManager) staticRouteCleanup(nextHops []net.IP) { if networkName != gw.netInfo.GetNetworkName() { return false } + if ipPrefix != nil && item.IPPrefix != ipPrefix.String() { + return false + } return ips.Has(item.Nexthop) } err := libovsdbops.DeleteLogicalRouterStaticRoutesWithPredicate(gw.nbClient, gw.clusterRouterName, p) @@ -1157,13 +1256,21 @@ func (gw *GatewayManager) removeLRPolicies(nodeName string) { } } +func (gw *GatewayManager) containsJoinIP(ip net.IP) bool { + ipNet := &net.IPNet{ + IP: ip, + Mask: util.GetIPFullMask(ip), + } + return util.IsContainedInAnyCIDR(ipNet, gw.netInfo.JoinSubnets()...) +} + func (gw *GatewayManager) syncGatewayLogicalNetwork( node *kapi.Node, l3GatewayConfig *util.L3GatewayConfig, hostSubnets []*net.IPNet, hostAddrs []string, clusterSubnets []*net.IPNet, - gwLRPIPs []*net.IPNet, + grLRPJoinIPs []*net.IPNet, isSCTPSupported bool, ovnClusterLRPToJoinIfAddrs []*net.IPNet, externalIPs []net.IP, @@ -1176,7 +1283,7 @@ func (gw *GatewayManager) syncGatewayLogicalNetwork( hostSubnets, l3GatewayConfig, isSCTPSupported, - gwLRPIPs, + grLRPJoinIPs, // the joinIP allocated to this node's GR for this controller's network ovnClusterLRPToJoinIfAddrs, externalIPs, enableGatewayMTU, @@ -1190,22 +1297,28 @@ func (gw *GatewayManager) syncGatewayLogicalNetwork( routerName = gw.gwRouterName } for _, subnet := range hostSubnets { - hostIfAddr := util.GetNodeManagementIfAddr(subnet) - if hostIfAddr == nil { - return fmt.Errorf("host interface address not found for subnet %q on network %q", subnet, gw.netInfo.GetNetworkName()) + mgmtIfAddr := util.GetNodeManagementIfAddr(subnet) + if mgmtIfAddr == nil { + return fmt.Errorf("management interface address not found for subnet %q on network %q", subnet, gw.netInfo.GetNetworkName()) } - l3GatewayConfigIP, err := util.MatchFirstIPNetFamily(utilnet.IsIPv6(hostIfAddr.IP), l3GatewayConfig.IPAddresses) + l3GatewayConfigIP, err := util.MatchFirstIPNetFamily(utilnet.IsIPv6(mgmtIfAddr.IP), l3GatewayConfig.IPAddresses) if err != nil { return fmt.Errorf("failed to extract the gateway IP addr for network %q: %v", gw.netInfo.GetNetworkName(), err) } - relevantHostIPs, err := util.MatchAllIPStringFamily(utilnet.IsIPv6(hostIfAddr.IP), hostAddrs) + relevantHostIPs, err := util.MatchAllIPStringFamily(utilnet.IsIPv6(mgmtIfAddr.IP), hostAddrs) if err != nil && err != util.ErrorNoIP { return fmt.Errorf("failed to extract the host IP addrs for network %q: %v", gw.netInfo.GetNetworkName(), err) } pbrMngr := gatewayrouter.NewPolicyBasedRoutesManager(gw.nbClient, routerName, gw.netInfo) - if err := pbrMngr.Add(node.Name, hostIfAddr.IP.String(), l3GatewayConfigIP, relevantHostIPs); err != nil { + if err := pbrMngr.AddSameNodeIPPolicy(node.Name, mgmtIfAddr.IP.String(), l3GatewayConfigIP, relevantHostIPs); err != nil { return fmt.Errorf("failed to configure the policy based routes for network %q: %v", gw.netInfo.GetNetworkName(), err) } + if gw.netInfo.TopologyType() == types.Layer2Topology && config.Gateway.Mode == config.GatewayModeLocal { + if err := pbrMngr.AddHostCIDRPolicy(node, mgmtIfAddr.IP.String(), subnet.String()); err != nil { + return fmt.Errorf("failed to configure the hostCIDR policy for L2 network %q on local gateway: %v", + gw.netInfo.GetNetworkName(), err) + } + } } return nil @@ -1217,7 +1330,7 @@ func (gw *GatewayManager) syncNodeGateway( l3GatewayConfig *util.L3GatewayConfig, hostSubnets []*net.IPNet, hostAddrs []string, - clusterSubnets, gwLRPIPs []*net.IPNet, + clusterSubnets, grLRPJoinIPs []*net.IPNet, isSCTPSupported bool, joinSwitchIPs []*net.IPNet, externalIPs []net.IP, @@ -1233,9 +1346,9 @@ func (gw *GatewayManager) syncNodeGateway( hostSubnets, hostAddrs, clusterSubnets, - gwLRPIPs, + grLRPJoinIPs, // the joinIP allocated to this node for this controller's network isSCTPSupported, - joinSwitchIPs, + joinSwitchIPs, // the .1 of this controller's global joinSubnet externalIPs, ); err != nil { return fmt.Errorf("error creating gateway for node %s: %v", node.Name, err) diff --git a/go-controller/pkg/ovn/gateway/gateway.go b/go-controller/pkg/ovn/gateway/gateway.go index 246161eebb..333a75b07c 100644 --- a/go-controller/pkg/ovn/gateway/gateway.go +++ b/go-controller/pkg/ovn/gateway/gateway.go @@ -63,15 +63,23 @@ func GetGatewayPhysicalIPs(nbClient libovsdbclient.Client, gatewayRouter string) // CreateDummyGWMacBindings creates mac bindings (ipv4 and ipv6) for a fake next hops // used by host->service traffic -func CreateDummyGWMacBindings(nbClient libovsdbclient.Client, gwRouterName string) error { +func CreateDummyGWMacBindings(nbClient libovsdbclient.Client, gwRouterName string, netInfo util.NetInfo) error { logicalPort := ovntypes.GWRouterToExtSwitchPrefix + gwRouterName - dummyNextHopIPs := node.DummyNextHopIPs() - smbs := make([]*nbdb.StaticMACBinding, len(dummyNextHopIPs)) - for i := range dummyNextHopIPs { + ips := node.DummyNextHopIPs() + // In UDN, add static MAC bindings for host masquerade IPs. + // This is necessary because the masquerade network is directly + // attached to the external port of the gateway router, + // and neighbor discovery has to be avoided since these IPs + // are the same across all nodes. + if netInfo.IsPrimaryNetwork() { + ips = append(ips, node.DummyMasqueradeIPs()...) + } + smbs := make([]*nbdb.StaticMACBinding, len(ips)) + for i := range ips { smb := &nbdb.StaticMACBinding{ LogicalPort: logicalPort, - MAC: util.IPAddrToHWAddr(dummyNextHopIPs[i]).String(), - IP: dummyNextHopIPs[i].String(), + MAC: util.IPAddrToHWAddr(ips[i]).String(), + IP: ips[i].String(), OverrideDynamicMAC: true, } smbs[i] = smb @@ -89,14 +97,17 @@ func CreateDummyGWMacBindings(nbClient libovsdbclient.Client, gwRouterName strin // DeleteDummyGWMacBindings removes mac bindings (ipv4 and ipv6) for a fake next hops // used by host->service traffic -func DeleteDummyGWMacBindings(nbClient libovsdbclient.Client, gwRouterName string) error { +func DeleteDummyGWMacBindings(nbClient libovsdbclient.Client, gwRouterName string, netInfo util.NetInfo) error { logicalPort := ovntypes.GWRouterToExtSwitchPrefix + gwRouterName - dummyNextHopIPs := node.DummyNextHopIPs() - smbs := make([]*nbdb.StaticMACBinding, len(dummyNextHopIPs)) - for i := range dummyNextHopIPs { + ips := node.DummyNextHopIPs() + if netInfo.IsPrimaryNetwork() { + ips = append(ips, node.DummyMasqueradeIPs()...) + } + smbs := make([]*nbdb.StaticMACBinding, len(ips)) + for i := range ips { smb := &nbdb.StaticMACBinding{ LogicalPort: logicalPort, - IP: dummyNextHopIPs[i].String(), + IP: ips[i].String(), } smbs[i] = smb } diff --git a/go-controller/pkg/ovn/gateway_test.go b/go-controller/pkg/ovn/gateway_test.go index 01fdfd0d5b..ebc1453b1a 100644 --- a/go-controller/pkg/ovn/gateway_test.go +++ b/go-controller/pkg/ovn/gateway_test.go @@ -6,7 +6,7 @@ import ( "strconv" "strings" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "github.com/onsi/gomega/format" v1 "k8s.io/api/core/v1" @@ -365,8 +365,9 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { ginkgo.It("creates an IPv4 gateway in OVN", func() { routeUUID := "route-UUID" leftoverMgmtIPRoute := &nbdb.LogicalRouterStaticRoute{ - Nexthop: "10.130.0.2", - UUID: routeUUID, + Nexthop: "10.130.0.2", + IPPrefix: "10.130.0.0/23", + UUID: routeUUID, } expectedOVNClusterRouter := &nbdb.LogicalRouter{ UUID: types.OVNClusterRouter + "-UUID", @@ -675,8 +676,9 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { ginkgo.It("creates an IPv4 gateway in OVN without next hops", func() { routeUUID := "route-UUID" leftoverMgmtIPRoute := &nbdb.LogicalRouterStaticRoute{ - Nexthop: "10.130.0.2", - UUID: routeUUID, + Nexthop: "10.130.0.2", + IPPrefix: "10.130.0.0/23", + UUID: routeUUID, } expectedOVNClusterRouter := &nbdb.LogicalRouter{ UUID: types.OVNClusterRouter + "-UUID", diff --git a/go-controller/pkg/ovn/gatewayrouter/policybasedroutes.go b/go-controller/pkg/ovn/gatewayrouter/policybasedroutes.go index e054a67c18..b98c5e31fb 100644 --- a/go-controller/pkg/ovn/gatewayrouter/policybasedroutes.go +++ b/go-controller/pkg/ovn/gatewayrouter/policybasedroutes.go @@ -6,6 +6,7 @@ import ( "strconv" "strings" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" utilnet "k8s.io/utils/net" @@ -31,7 +32,7 @@ func NewPolicyBasedRoutesManager(nbClient client.Client, clusterRouterName strin } } -func (pbr *PolicyBasedRoutesManager) Add(nodeName, mgmtPortIP string, hostIfCIDR *net.IPNet, otherHostAddrs []string) error { +func (pbr *PolicyBasedRoutesManager) AddSameNodeIPPolicy(nodeName, mgmtPortIP string, hostIfCIDR *net.IPNet, otherHostAddrs []string) error { if hostIfCIDR == nil { return fmt.Errorf(" host interface CIDR") } @@ -44,13 +45,13 @@ func (pbr *PolicyBasedRoutesManager) Add(nodeName, mgmtPortIP string, hostIfCIDR if !isHostIPsValid(otherHostAddrs) { return fmt.Errorf("invalid other host address(es): %v", otherHostAddrs) } - l3Prefix := getIPPrefix(hostIfCIDR.IP) + l3Prefix := getIPCIDRPrefix(hostIfCIDR) matches := sets.New[string]() for _, hostIP := range append(otherHostAddrs, hostIfCIDR.IP.String()) { // embed nodeName as comment so that it is easier to delete these rules later on. // logical router policy doesn't support external_ids to stash metadata networkScopedSwitchName := pbr.netInfo.GetNetworkScopedSwitchName(nodeName) - matchStr := generateMatch(networkScopedSwitchName, l3Prefix, hostIP) + matchStr := generateNodeIPMatch(networkScopedSwitchName, l3Prefix, hostIP) matches = matches.Insert(matchStr) } @@ -66,6 +67,46 @@ func (pbr *PolicyBasedRoutesManager) Add(nodeName, mgmtPortIP string, hostIfCIDR return nil } +// AddHostCIDRPolicy adds the following policy in local-gateway-mode for UDN L2 topology to the GR +// 99 ip4.dst == 172.18.0.0/16 && ip4.src == 10.100.200.0/24 reroute 10.100.200.2 +// Since rtoe of GR is directly connected to the hostCIDR range in LGW even with the following +// reroute to mp0 src-ip route on GR that we add from syncNodeManagementPort: +// 10.100.200.0/24 10.100.200.2 src-ip +// the dst-ip based default OVN route takes precedence because the primary nodeCIDR range is a +// directly attached network to the OVN router and sends the traffic destined for other nodes to rtoe +// and via br-ex to outside in LGW which is not desired. +// Hence we need a LRP that sends all traffic destined to that primary nodeCIDR range that reroutes +// it to mp0 in LGW mode to override this directly attached network OVN route. +func (pbr *PolicyBasedRoutesManager) AddHostCIDRPolicy(node *v1.Node, mgmtPortIP, clusterPodSubnet string) error { + if mgmtPortIP == "" || net.ParseIP(mgmtPortIP) == nil { + return fmt.Errorf("invalid management port IP address: %q", mgmtPortIP) + } + // we only care about the primary node family since GR's port has that IP + // we don't care about secondary nodeIPs here which is why we are not using + // the hostCIDR annotation + primaryIfAddrs, err := util.GetNodeIfAddrAnnotation(node) + if err != nil { + return fmt.Errorf("failed to get primaryIP for node %s, err: %v", node.Name, err) + } + nodePrimaryStringPrefix := primaryIfAddrs.IPv4 + if utilnet.IsIPv6String(mgmtPortIP) { + nodePrimaryStringPrefix = primaryIfAddrs.IPv6 + } + _, nodePrimaryCIDRPrefix, err := net.ParseCIDR(nodePrimaryStringPrefix) + if nodePrimaryStringPrefix == "" || err != nil || nodePrimaryCIDRPrefix == nil { + return fmt.Errorf("invalid host CIDR prefix: prefixString: %q, prefixCIDR: %q, error: %v", + nodePrimaryStringPrefix, nodePrimaryCIDRPrefix, err) + } + ovnPrefix := getIPCIDRPrefix(nodePrimaryCIDRPrefix) + matchStr := generateHostCIDRMatch(ovnPrefix, nodePrimaryCIDRPrefix.String(), clusterPodSubnet) + if err := pbr.createPolicyBasedRoutes(matchStr, ovntypes.UDNHostCIDRPolicyPriority, mgmtPortIP); err != nil { + return fmt.Errorf("failed to add host-cidr policy route '%s' on host %q on %s "+ + "error: %v", matchStr, node.Name, pbr.clusterRouterName, err) + } + + return nil +} + // This function syncs logical router policies given various criteria // This function compares the following ovn-nbctl output: @@ -218,12 +259,16 @@ func (pbr *PolicyBasedRoutesManager) createPolicyBasedRoutes(match, priority, ne return nil } -func generateMatch(switchName, ipPrefix, hostIP string) string { +func generateNodeIPMatch(switchName, ipPrefix, hostIP string) string { return fmt.Sprintf(`inport == "%s%s" && %s.dst == %s /* %s */`, ovntypes.RouterToSwitchPrefix, switchName, ipPrefix, hostIP, switchName) } -func getIPPrefix(ip net.IP) string { - if utilnet.IsIPv6(ip) { +func generateHostCIDRMatch(ipPrefix, nodePrimaryCIDRPrefix, clusterPodSubnetPrefix string) string { + return fmt.Sprintf(`%s.dst == %s && %s.src == %s`, ipPrefix, nodePrimaryCIDRPrefix, ipPrefix, clusterPodSubnetPrefix) +} + +func getIPCIDRPrefix(cidr *net.IPNet) string { + if utilnet.IsIPv6CIDR(cidr) { return "ip6" } return "ip4" diff --git a/go-controller/pkg/ovn/gatewayrouter/policybasedroutes_test.go b/go-controller/pkg/ovn/gatewayrouter/policybasedroutes_test.go index b151465eed..d90152717b 100644 --- a/go-controller/pkg/ovn/gatewayrouter/policybasedroutes_test.go +++ b/go-controller/pkg/ovn/gatewayrouter/policybasedroutes_test.go @@ -12,6 +12,8 @@ import ( libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilnet "k8s.io/utils/net" ) @@ -37,7 +39,7 @@ func (n network) copyNetworkAndSetLRPs(lrps ...*nbdb.LogicalRouterPolicy) networ return nCopy } -func (n network) generateTestData() []libovsdbtest.TestData { +func (n network) generateTestData(nodeName string) []libovsdbtest.TestData { data := make([]libovsdbtest.TestData, 0, 0) lrpUUIDs := make([]string, 0, len(n.initialLRPs)) for _, lrp := range n.initialLRPs { @@ -57,15 +59,19 @@ func (n network) generateTestData() []libovsdbtest.TestData { Name: n.info.GetNetworkScopedClusterRouterName(), Policies: lrpUUIDs, } + if n.info.TopologyType() == types.Layer2Topology { + lr.Name = n.info.GetNetworkScopedGWRouterName(nodeName) + lr.UUID = getLRUUID(n.info.GetNetworkScopedGWRouterName(nodeName)) + } return append(data, lr) } type networks []network -func (ns networks) generateTestData() []libovsdbtest.TestData { +func (ns networks) generateTestData(nodeName string) []libovsdbtest.TestData { data := make([]libovsdbtest.TestData, 0) for _, n := range ns { - data = append(data, n.generateTestData()...) + data = append(data, n.generateTestData(nodeName)...) } return data } @@ -91,7 +97,7 @@ type test struct { expectErr bool } -func TestAdd(t *testing.T) { +func TestAddSameNodeIPPolicy(t *testing.T) { const ( node1Name = "node1" node1HostIPv4Str = "192.168.1.10" @@ -159,7 +165,7 @@ func TestAdd(t *testing.T) { &nbdb.LogicalRouterPolicy{ UUID: "node-ip-lrp-uuid", Priority: nodeSubNetPrio, - Match: generateMatch(cdnL3Network.info.GetNetworkScopedSwitchName(node1Name), v4Prefix, node1HostIPv4Str), + Match: generateNodeIPMatch(cdnL3Network.info.GetNetworkScopedSwitchName(node1Name), v4Prefix, node1HostIPv4Str), Action: nbdb.LogicalRouterPolicyActionReroute, Nexthops: []string{node1CDNMgntIPv4Str}, }, @@ -186,14 +192,14 @@ func TestAdd(t *testing.T) { &nbdb.LogicalRouterPolicy{ UUID: "node-ip-lrp-uuid", Priority: nodeSubNetPrio, - Match: generateMatch(cdnL3Network.info.GetNetworkScopedSwitchName(node1Name), v4Prefix, node1HostIPv4Str), + Match: generateNodeIPMatch(cdnL3Network.info.GetNetworkScopedSwitchName(node1Name), v4Prefix, node1HostIPv4Str), Action: nbdb.LogicalRouterPolicyActionReroute, Nexthops: []string{node1CDNMgntIPv4Str}, }, &nbdb.LogicalRouterPolicy{ UUID: "node-ip2-lrp-uuid", Priority: nodeSubNetPrio, - Match: generateMatch(cdnL3Network.info.GetNetworkScopedSwitchName(node1Name), v4Prefix, node1HostOtherAddrIPv4Str), + Match: generateNodeIPMatch(cdnL3Network.info.GetNetworkScopedSwitchName(node1Name), v4Prefix, node1HostOtherAddrIPv4Str), Action: nbdb.LogicalRouterPolicyActionReroute, Nexthops: []string{node1CDNMgntIPv4Str}, }, @@ -234,7 +240,7 @@ func TestAdd(t *testing.T) { &nbdb.LogicalRouterPolicy{ UUID: "node-ip-lrp-uuid", Priority: nodeSubNetPrio, - Match: generateMatch(cdnL3Network.info.GetNetworkScopedSwitchName(node1Name), v6Prefix, node1HostIPv6Str), + Match: generateNodeIPMatch(cdnL3Network.info.GetNetworkScopedSwitchName(node1Name), v6Prefix, node1HostIPv6Str), Action: nbdb.LogicalRouterPolicyActionReroute, Nexthops: []string{node1CDNMgntIPv6Str}, }, @@ -267,14 +273,14 @@ func TestAdd(t *testing.T) { &nbdb.LogicalRouterPolicy{ UUID: "node-ip-lrp-v4-uuid", Priority: nodeSubNetPrio, - Match: generateMatch(cdnL3Network.info.GetNetworkScopedSwitchName(node1Name), v4Prefix, node1HostIPv4Str), + Match: generateNodeIPMatch(cdnL3Network.info.GetNetworkScopedSwitchName(node1Name), v4Prefix, node1HostIPv4Str), Action: nbdb.LogicalRouterPolicyActionReroute, Nexthops: []string{node1CDNMgntIPv4Str}, }, &nbdb.LogicalRouterPolicy{ UUID: "node-ip-lrp-v6-uuid", Priority: nodeSubNetPrio, - Match: generateMatch(cdnL3Network.info.GetNetworkScopedSwitchName(node1Name), v6Prefix, node1HostIPv6Str), + Match: generateNodeIPMatch(cdnL3Network.info.GetNetworkScopedSwitchName(node1Name), v6Prefix, node1HostIPv6Str), Action: nbdb.LogicalRouterPolicyActionReroute, Nexthops: []string{node1CDNMgntIPv6Str}, }, @@ -312,14 +318,14 @@ func TestAdd(t *testing.T) { &nbdb.LogicalRouterPolicy{ UUID: "node-ip-lrp-v4-uuid", Priority: nodeSubNetPrio, - Match: generateMatch(cdnL3Network.info.GetNetworkScopedSwitchName(node1Name), v4Prefix, node1HostIPv4Str), + Match: generateNodeIPMatch(cdnL3Network.info.GetNetworkScopedSwitchName(node1Name), v4Prefix, node1HostIPv4Str), Action: nbdb.LogicalRouterPolicyActionReroute, Nexthops: []string{node1CDNMgntIPv4Str}, }, &nbdb.LogicalRouterPolicy{ UUID: "node-ip-lrp-v6-uuid", Priority: nodeSubNetPrio, - Match: generateMatch(udnL3Network.info.GetNetworkScopedSwitchName(node1Name), v6Prefix, node1HostIPv6Str), + Match: generateNodeIPMatch(udnL3Network.info.GetNetworkScopedSwitchName(node1Name), v6Prefix, node1HostIPv6Str), Action: nbdb.LogicalRouterPolicyActionReroute, Nexthops: []string{node1UDNMgntIPv6Str}, ExternalIDs: map[string]string{ @@ -344,7 +350,7 @@ func TestAdd(t *testing.T) { &nbdb.LogicalRouterPolicy{ UUID: "node-ip-lrp-uuid", Priority: nodeSubNetPrio, - Match: generateMatch(cdnL3Network.info.GetNetworkScopedSwitchName(node1Name), v4Prefix, node1HostIPv4Str), + Match: generateNodeIPMatch(cdnL3Network.info.GetNetworkScopedSwitchName(node1Name), v4Prefix, node1HostIPv4Str), Action: nbdb.LogicalRouterPolicyActionReroute, Nexthops: []string{node1CDNMgntIPv4Str}, })}, @@ -357,7 +363,7 @@ func TestAdd(t *testing.T) { &nbdb.LogicalRouterPolicy{ UUID: "node-ip-lrp-uuid", Priority: nodeSubNetPrio, - Match: generateMatch(cdnL3Network.info.GetNetworkScopedSwitchName(node1Name), v4Prefix, node1HostIPv4Str), + Match: generateNodeIPMatch(cdnL3Network.info.GetNetworkScopedSwitchName(node1Name), v4Prefix, node1HostIPv4Str), Action: nbdb.LogicalRouterPolicyActionReroute, Nexthops: []string{node1CDNMgntIPv4Str}, }, @@ -378,7 +384,7 @@ func TestAdd(t *testing.T) { &nbdb.LogicalRouterPolicy{ UUID: "node-ip-lrp-uuid", Priority: nodeSubNetPrio, - Match: generateMatch(cdnL3Network.info.GetNetworkScopedSwitchName(node1Name), v4Prefix, node1HostIPv4Str), + Match: generateNodeIPMatch(cdnL3Network.info.GetNetworkScopedSwitchName(node1Name), v4Prefix, node1HostIPv4Str), Action: nbdb.LogicalRouterPolicyActionReroute, Nexthops: []string{node1CDNMgntIPv4Str}, })}, @@ -391,7 +397,7 @@ func TestAdd(t *testing.T) { &nbdb.LogicalRouterPolicy{ UUID: "node-ip-lrp-uuid", Priority: nodeSubNetPrio, - Match: generateMatch(cdnL3Network.info.GetNetworkScopedSwitchName(node1Name), v4Prefix, node1HostIPv4Str), + Match: generateNodeIPMatch(cdnL3Network.info.GetNetworkScopedSwitchName(node1Name), v4Prefix, node1HostIPv4Str), Action: nbdb.LogicalRouterPolicyActionReroute, Nexthops: []string{node1CDNMgntIPv4Str}, }, @@ -419,7 +425,7 @@ func TestAdd(t *testing.T) { &nbdb.LogicalRouterPolicy{ UUID: "node-ip-lrp-uuid", Priority: nodeSubNetPrio, - Match: generateMatch(cdnL3Network.info.GetNetworkScopedSwitchName(node1Name), v4Prefix, node1HostIPv4Str), + Match: generateNodeIPMatch(cdnL3Network.info.GetNetworkScopedSwitchName(node1Name), v4Prefix, node1HostIPv4Str), Action: nbdb.LogicalRouterPolicyActionReroute, Nexthops: []string{node1CDNMgntIPv4Str}, }), @@ -438,14 +444,14 @@ func TestAdd(t *testing.T) { &nbdb.LogicalRouterPolicy{ UUID: "node-ip-lrp-uuid", Priority: nodeSubNetPrio, - Match: generateMatch(cdnL3Network.info.GetNetworkScopedSwitchName(node1Name), v4Prefix, node1HostIPv4Str), + Match: generateNodeIPMatch(cdnL3Network.info.GetNetworkScopedSwitchName(node1Name), v4Prefix, node1HostIPv4Str), Action: nbdb.LogicalRouterPolicyActionReroute, Nexthops: []string{node1CDNMgntIPv4Str}, }, &nbdb.LogicalRouterPolicy{ UUID: "node-ip-lrp2-uuid", Priority: nodeSubNetPrio, - Match: generateMatch(udnL3Network.info.GetNetworkScopedSwitchName(node1Name), v4Prefix, node1HostIPv4Str), + Match: generateNodeIPMatch(udnL3Network.info.GetNetworkScopedSwitchName(node1Name), v4Prefix, node1HostIPv4Str), Action: nbdb.LogicalRouterPolicyActionReroute, Nexthops: []string{node1UDNMgntIPv4Str}, ExternalIDs: map[string]string{ @@ -460,7 +466,7 @@ func TestAdd(t *testing.T) { for _, tt := range tests { t.Run(tt.desc, func(t *testing.T) { dbSetup := libovsdbtest.TestSetup{ - NBData: tt.initialDB.generateTestData(), + NBData: tt.initialDB.generateTestData(node1Name), } nbdbClient, cleanup, err := libovsdbtest.NewNBTestHarness(dbSetup, nil) if err != nil { @@ -484,7 +490,7 @@ func TestAdd(t *testing.T) { if utilnet.IsIPv6(p.hostInfCIDR.IP) { mgntIP = targetNet.mgntIPv6 } - err = mgr.Add(p.nodeName, mgntIP, p.hostInfCIDR, p.otherHostInfAddrs) + err = mgr.AddSameNodeIPPolicy(p.nodeName, mgntIP, p.hostInfCIDR, p.otherHostInfAddrs) if tt.expectErr && err == nil { t.Fatalf("test: \"%s\", expected error but none occured", tt.desc) } @@ -503,3 +509,141 @@ func TestAdd(t *testing.T) { }) } } + +func TestAddHostCIDRPolicy(t *testing.T) { + const ( + node1Name = "node1" + hostCIDRV4RangeStr = "192.168.1.0/24" + hostCIDRV6RangeStr = "fc00:f853:ccd:e793::/64" + node1HostIPv4Str = "192.168.1.10" + node1HostCIDR24IPv4Str = node1HostIPv4Str + "/24" + node1HostIPv6Str = "fc00:f853:ccd:e793::3" + node1HostCIDR64IPv6Str = node1HostIPv6Str + "/64" + joinSubnetIPv4Str = "100.10.1.0/24" + clusterSubnetIPv4Str = "10.128.0.0/16" + clusterSubnetIPv6Str = "2002:0:0:1234::/64" + node1UDNMgntIPv4Str = "10.200.1.2" + node1UDNMgntIPv6Str = "fd00:20:244::2" + v4Prefix = "ip4" + v6Prefix = "ip6" + udnNetworkName = "network1" + ) + + var ( + hostCIDRPolicyPrio, _ = strconv.Atoi(types.UDNHostCIDRPolicyPriority) + _, hostCIDRV4Range, _ = net.ParseCIDR(hostCIDRV4RangeStr) + _, hostCIDRV6Range, _ = net.ParseCIDR(hostCIDRV6RangeStr) + l2NetInfo, _ = util.NewNetInfo(&types2.NetConf{ + NetConf: cnitypes.NetConf{Name: udnNetworkName}, + Topology: types.Layer2Topology, + JoinSubnet: joinSubnetIPv4Str, // not required, but adding so NewNetInfo doesn't fail + Subnets: clusterSubnetIPv4Str + "," + clusterSubnetIPv6Str, // not required, but adding so NewNetInfo doesn't fail + }) + udnL2Network = network{ + initialLRPs: nil, + info: l2NetInfo, + mgntIPv4: node1UDNMgntIPv4Str, + mgntIPv6: node1UDNMgntIPv6Str, + } + node = &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: node1Name, + Annotations: map[string]string{ + "k8s.ovn.org/node-primary-ifaddr": fmt.Sprintf("{\"ipv4\": \"%s\", \"ipv6\": \"%s\"}", + node1HostCIDR24IPv4Str, node1HostCIDR64IPv6Str), + }, + }, + } + ) + + tests := []test{ + { + desc: "[udn][l2][ipv4][ipv6] add hostCIDR policy for L2", + addPolicies: []policy{ + { + targetNetwork: udnL2Network.info.GetNetworkName(), + hostInfCIDR: hostCIDRV4Range, + }, + { + targetNetwork: udnL2Network.info.GetNetworkName(), + hostInfCIDR: hostCIDRV6Range, + }, + }, + initialDB: networks{udnL2Network}, + expectedDB: []libovsdbtest.TestData{ + &nbdb.LogicalRouter{ + UUID: "udn-gr-uuid", + Name: udnL2Network.info.GetNetworkScopedGWRouterName(node1Name), + Policies: []string{"node-ip-lrp-v4-uuid", "node-ip-lrp-v6-uuid"}, + }, + &nbdb.LogicalRouterPolicy{ + UUID: "node-ip-lrp-v4-uuid", + Priority: hostCIDRPolicyPrio, + Match: generateHostCIDRMatch(v4Prefix, hostCIDRV4RangeStr, clusterSubnetIPv4Str), + Action: nbdb.LogicalRouterPolicyActionReroute, + Nexthops: []string{node1UDNMgntIPv4Str}, + ExternalIDs: map[string]string{ + types.NetworkExternalID: udnL2Network.info.GetNetworkName(), + types.TopologyExternalID: udnL2Network.info.TopologyType(), + }, + }, + &nbdb.LogicalRouterPolicy{ + UUID: "node-ip-lrp-v6-uuid", + Priority: hostCIDRPolicyPrio, + Match: generateHostCIDRMatch(v6Prefix, hostCIDRV6RangeStr, clusterSubnetIPv6Str), + Action: nbdb.LogicalRouterPolicyActionReroute, + Nexthops: []string{node1UDNMgntIPv6Str}, + ExternalIDs: map[string]string{ + types.NetworkExternalID: udnL2Network.info.GetNetworkName(), + types.TopologyExternalID: udnL2Network.info.TopologyType(), + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + dbSetup := libovsdbtest.TestSetup{ + NBData: tt.initialDB.generateTestData(node1Name), + } + nbdbClient, cleanup, err := libovsdbtest.NewNBTestHarness(dbSetup, nil) + if err != nil { + t.Errorf("libovsdb client error: %v", err) + return + } + t.Cleanup(cleanup.Cleanup) + netToMgr := map[string]*PolicyBasedRoutesManager{} + for _, net := range tt.initialDB { + netToMgr[net.info.GetNetworkName()] = NewPolicyBasedRoutesManager(nbdbClient, net.info.GetNetworkScopedGWRouterName(node1Name), net.info) + } + // verify all polices have a valid network name + for _, p := range tt.addPolicies { + mgr, ok := netToMgr[p.targetNetwork] + if !ok { + t.Errorf("policy defined a network %q but no associated network defined with this name", p.targetNetwork) + return + } + targetNet := tt.initialDB.getNetwork(p.targetNetwork) + mgntIP := targetNet.mgntIPv4 + clustersubnet := clusterSubnetIPv4Str + if utilnet.IsIPv6(p.hostInfCIDR.IP) { + mgntIP = targetNet.mgntIPv6 + clustersubnet = clusterSubnetIPv6Str + } + err = mgr.AddHostCIDRPolicy(node, mgntIP, clustersubnet) + if err != nil { + t.Fatal(fmt.Errorf("test: \"%s\" encountered error: %v", tt.desc, err)) + } + } + matcher := libovsdbtest.HaveData(tt.expectedDB) + success, err := matcher.Match(nbdbClient) + if !success { + t.Fatal(fmt.Errorf("test: \"%s\" didn't match expected with actual, err: %v", tt.desc, matcher.FailureMessage(nbdbClient))) + } + if err != nil { + t.Fatal(fmt.Errorf("test: \"%s\" encountered error: %v", tt.desc, err)) + } + }) + } +} diff --git a/go-controller/pkg/ovn/healthcheck/egressip_healthcheck.go b/go-controller/pkg/ovn/healthcheck/egressip_healthcheck.go index 210b666c95..a8a667d710 100644 --- a/go-controller/pkg/ovn/healthcheck/egressip_healthcheck.go +++ b/go-controller/pkg/ovn/healthcheck/egressip_healthcheck.go @@ -154,6 +154,8 @@ func (ehc *egressIPHealthClient) Connect(dialCtx context.Context, mgmtIPs []net. var nodeAddr string var err error + // Ignore SA1019, PR#4286 should refactor this. + // nolint:staticcheck opts := []grpc.DialOption{ grpc.WithBlock(), grpc.WithContextDialer(func(ctx context.Context, s string) (net.Conn, error) { @@ -175,6 +177,8 @@ func (ehc *egressIPHealthClient) Connect(dialCtx context.Context, mgmtIPs []net. } for _, nodeMgmtIP := range mgmtIPs { nodeAddr = net.JoinHostPort(nodeMgmtIP.String(), strconv.Itoa(healthCheckPort)) + // Ignore SA1019, PR#4286 should refactor this. + //nolint:staticcheck conn, err = grpc.DialContext(dialCtx, nodeAddr, opts...) if err == nil && conn != nil { break diff --git a/go-controller/pkg/ovn/hybrid_test.go b/go-controller/pkg/ovn/hybrid_test.go index 0bdd719743..55f781637e 100644 --- a/go-controller/pkg/ovn/hybrid_test.go +++ b/go-controller/pkg/ovn/hybrid_test.go @@ -37,7 +37,7 @@ import ( ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" ) @@ -212,7 +212,7 @@ var _ = ginkgo.Describe("Hybrid SDN Master Operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) clusterController, err := NewOvnController(fakeClient.GetMasterClientset(), f, stopChan, nil, libovsdbOvnNBClient, libovsdbOvnSBClient, - record.NewFakeRecorder(10), wg) + record.NewFakeRecorder(10), wg, nil, NewPortCache(stopChan)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) c, cancel := context.WithCancel(ctx.Context) defer cancel() @@ -370,7 +370,7 @@ var _ = ginkgo.Describe("Hybrid SDN Master Operations", func() { expectedDatabaseState = addNodeLogicalFlows(expectedDatabaseState, expectedOVNClusterRouter, expectedNodeSwitch, expectedClusterRouterPortGroup, expectedClusterPortGroup, &node1) clusterController, err := NewOvnController(fakeClient.GetMasterClientset(), f, stopChan, nil, libovsdbOvnNBClient, libovsdbOvnSBClient, - record.NewFakeRecorder(10), wg) + record.NewFakeRecorder(10), wg, nil, NewPortCache(stopChan)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) setupCOPP := true @@ -667,7 +667,7 @@ var _ = ginkgo.Describe("Hybrid SDN Master Operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) clusterController, err := NewOvnController(fakeClient.GetMasterClientset(), f, stopChan, nil, libovsdbOvnNBClient, libovsdbOvnSBClient, - record.NewFakeRecorder(10), wg) + record.NewFakeRecorder(10), wg, nil, NewPortCache(stopChan)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) setupCOPP := true @@ -839,7 +839,7 @@ var _ = ginkgo.Describe("Hybrid SDN Master Operations", func() { expectedDatabaseState = addNodeLogicalFlows(expectedDatabaseState, expectedOVNClusterRouter, expectedNodeSwitch, expectedClusterRouterPortGroup, expectedClusterPortGroup, &node1) clusterController, err := NewOvnController(fakeClient.GetMasterClientset(), f, stopChan, nil, libovsdbOvnNBClient, libovsdbOvnSBClient, - record.NewFakeRecorder(10), wg) + record.NewFakeRecorder(10), wg, nil, NewPortCache(stopChan)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) setupCOPP := true @@ -1123,7 +1123,7 @@ var _ = ginkgo.Describe("Hybrid SDN Master Operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) clusterController, err := NewOvnController(fakeClient.GetMasterClientset(), f, stopChan, nil, libovsdbOvnNBClient, libovsdbOvnSBClient, - record.NewFakeRecorder(10), wg) + record.NewFakeRecorder(10), wg, nil, NewPortCache(stopChan)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) setupCOPP := true @@ -1325,7 +1325,7 @@ var _ = ginkgo.Describe("Hybrid SDN Master Operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) clusterController, err := NewOvnController(fakeClient.GetMasterClientset(), f, stopChan, nil, libovsdbOvnNBClient, libovsdbOvnSBClient, - record.NewFakeRecorder(10), wg) + record.NewFakeRecorder(10), wg, nil, NewPortCache(stopChan)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) setupCOPP := true @@ -1519,7 +1519,7 @@ var _ = ginkgo.Describe("Hybrid SDN Master Operations", func() { expectedDatabaseState = addNodeLogicalFlows(expectedDatabaseState, expectedOVNClusterRouter, expectedNodeSwitch, expectedClusterRouterPortGroup, expectedClusterPortGroup, &node1) clusterController, err := NewOvnController(fakeClient, f, stopChan, nil, libovsdbOvnNBClient, libovsdbOvnSBClient, - record.NewFakeRecorder(10), wg) + record.NewFakeRecorder(10), wg, nil, NewPortCache(stopChan)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) setupCOPP := true @@ -1741,7 +1741,7 @@ var _ = ginkgo.Describe("Hybrid SDN Master Operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) clusterController, err := NewOvnController(fakeClient, f, stopChan, nil, libovsdbOvnNBClient, libovsdbOvnSBClient, - record.NewFakeRecorder(10), wg) + record.NewFakeRecorder(10), wg, nil, NewPortCache(stopChan)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) setupCOPP := true diff --git a/go-controller/pkg/ovn/kubevirt_test.go b/go-controller/pkg/ovn/kubevirt_test.go index 98f1a7ba00..3c78300c55 100644 --- a/go-controller/pkg/ovn/kubevirt_test.go +++ b/go-controller/pkg/ovn/kubevirt_test.go @@ -7,8 +7,7 @@ import ( "strings" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/ginkgo/extensions/table" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" kubevirtv1 "kubevirt.io/api/core/v1" @@ -27,6 +26,7 @@ import ( corev1 "k8s.io/api/core/v1" kapi "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" + kapierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ktypes "k8s.io/apimachinery/pkg/types" ) @@ -272,13 +272,14 @@ var _ = Describe("OVN Kubevirt Operations", func() { ComposeDHCPv4Options = func(uuid, namespace string, t *testDHCPOptions) *nbdb.DHCPOptions { dhcpOptions := kubevirt.ComposeDHCPv4Options( t.cidr, - t.dns, DefaultNetworkControllerName, ktypes.NamespacedName{ Namespace: namespace, Name: t.hostname, }, ) + dhcpOptions.Options["dns_server"] = t.dns + dhcpOptions.Options["router"] = kubevirt.ARPProxyIPv4 dhcpOptions.UUID = uuid return dhcpOptions @@ -286,7 +287,6 @@ var _ = Describe("OVN Kubevirt Operations", func() { ComposeDHCPv6Options = func(uuid, namespace string, t *testDHCPOptions) *nbdb.DHCPOptions { dhcpOptions := kubevirt.ComposeDHCPv6Options( t.cidr, - t.dns, DefaultNetworkControllerName, ktypes.NamespacedName{ Namespace: namespace, @@ -294,6 +294,7 @@ var _ = Describe("OVN Kubevirt Operations", func() { }, ) dhcpOptions.UUID = uuid + dhcpOptions.Options["dns_server"] = t.dns return dhcpOptions } composePolicy = func(uuid string, p testPolicy, t testData) *nbdb.LogicalRouterPolicy { @@ -307,7 +308,7 @@ var _ = Describe("OVN Kubevirt Operations", func() { Action: nbdb.LogicalRouterPolicyActionReroute, Nexthops: []string{p.nexthop}, ExternalIDs: externalIDs(t.namespace, vmName, kubevirt.OvnLocalZone), - Priority: ovntypes.EgressLiveMigrationReroutePiority, + Priority: ovntypes.EgressLiveMigrationReroutePriority, } } composeStaticRoute = func(uuid string, r testStaticRoute, t testData) *nbdb.LogicalRouterStaticRoute { @@ -712,8 +713,8 @@ var _ = Describe("OVN Kubevirt Operations", func() { podToCreate.Labels = t.migrationTarget.labels podToCreate.Annotations = t.migrationTarget.annotations } - pod, _ := fakeOvn.fakeClient.KubeClient.CoreV1().Pods(t.namespace).Get(context.TODO(), podToCreate.Name, metav1.GetOptions{}) - Expect(pod).To(BeNil()) + _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(t.namespace).Get(context.TODO(), podToCreate.Name, metav1.GetOptions{}) + Expect(err).To(MatchError(kapierrors.IsNotFound, "IsNotFound")) podToCreate.CreationTimestamp = metav1.NewTime(time.Now()) _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(t.namespace).Create(context.TODO(), podToCreate, metav1.CreateOptions{}) diff --git a/go-controller/pkg/ovn/logical_switch_manager/logical_switch_manager.go b/go-controller/pkg/ovn/logical_switch_manager/logical_switch_manager.go index dfcaca4258..8f4c7b3391 100644 --- a/go-controller/pkg/ovn/logical_switch_manager/logical_switch_manager.go +++ b/go-controller/pkg/ovn/logical_switch_manager/logical_switch_manager.go @@ -97,7 +97,7 @@ func (manager *LogicalSwitchManager) AllocateUntilFull(switchName string) error // AllocateIPs will block off IPs in the ipnets slice as already allocated // for a given switch func (manager *LogicalSwitchManager) AllocateIPs(switchName string, ipnets []*net.IPNet) error { - return manager.allocator.AllocateIPs(switchName, ipnets) + return manager.allocator.AllocateIPPerSubnet(switchName, ipnets) } // AllocateNextIPs allocates IP addresses from each of the host subnets diff --git a/go-controller/pkg/ovn/logical_switch_manager/logical_switch_manager_suite_test.go b/go-controller/pkg/ovn/logical_switch_manager/logical_switch_manager_suite_test.go index 0c9dc01e18..35ada37897 100644 --- a/go-controller/pkg/ovn/logical_switch_manager/logical_switch_manager_suite_test.go +++ b/go-controller/pkg/ovn/logical_switch_manager/logical_switch_manager_suite_test.go @@ -3,7 +3,7 @@ package logicalswitchmanager import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/ovn/logical_switch_manager/logical_switch_manager_test.go b/go-controller/pkg/ovn/logical_switch_manager/logical_switch_manager_test.go index f4ec9feb56..157d56acc0 100644 --- a/go-controller/pkg/ovn/logical_switch_manager/logical_switch_manager_test.go +++ b/go-controller/pkg/ovn/logical_switch_manager/logical_switch_manager_test.go @@ -10,7 +10,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/ovn/master.go b/go-controller/pkg/ovn/master.go index 87ec5db269..e346bee98d 100644 --- a/go-controller/pkg/ovn/master.go +++ b/go-controller/pkg/ovn/master.go @@ -18,7 +18,6 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" - libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" @@ -48,67 +47,13 @@ func (oc *DefaultNetworkController) SetupMaster(existingNodeNames []string) erro return err } - pgIDs := oc.getClusterPortGroupDbIDs(types.ClusterPortGroupNameBase) - pg := &nbdb.PortGroup{ - Name: libovsdbutil.GetPortGroupName(pgIDs), - } - pg, err = libovsdbops.GetPortGroup(oc.nbClient, pg) - if err != nil && !errors.Is(err, libovsdbclient.ErrNotFound) { + if err := oc.setupClusterPortGroups(); err != nil { return err } - if pg == nil { - // we didn't find an existing clusterPG, let's create a new empty PG (fresh cluster install) - // Create a cluster-wide port group that all logical switch ports are part of - pg := libovsdbutil.BuildPortGroup(pgIDs, nil, nil) - err = libovsdbops.CreateOrUpdatePortGroups(oc.nbClient, pg) - if err != nil { - klog.Errorf("Failed to create cluster port group: %v", err) - return err - } - } - pgIDs = oc.getClusterPortGroupDbIDs(types.ClusterRtrPortGroupNameBase) - pg = &nbdb.PortGroup{ - Name: libovsdbutil.GetPortGroupName(pgIDs), - } - pg, err = libovsdbops.GetPortGroup(oc.nbClient, pg) - if err != nil && !errors.Is(err, libovsdbclient.ErrNotFound) { + if err := oc.syncDefaultMulticastPolicies(); err != nil { return err } - if pg == nil { - // we didn't find an existing clusterRtrPG, let's create a new empty PG (fresh cluster install) - // Create a cluster-wide port group with all node-to-cluster router - // logical switch ports. Currently the only user is multicast but it might - // be used for other features in the future. - pg = libovsdbutil.BuildPortGroup(pgIDs, nil, nil) - err = libovsdbops.CreateOrUpdatePortGroups(oc.nbClient, pg) - if err != nil { - klog.Errorf("Failed to create cluster port group: %v", err) - return err - } - } - - // If supported, enable IGMP relay on the router to forward multicast - // traffic between nodes. - if oc.multicastSupport { - // Drop IP multicast globally. Multicast is allowed only if explicitly - // enabled in a namespace. - if err := oc.createDefaultDenyMulticastPolicy(); err != nil { - klog.Errorf("Failed to create default deny multicast policy, error: %v", err) - return err - } - - // Allow IP multicast from node switch to cluster router and from - // cluster router to node switch. - if err := oc.createDefaultAllowMulticastPolicy(); err != nil { - klog.Errorf("Failed to create default deny multicast policy, error: %v", err) - return err - } - } else { - if err = oc.disableMulticast(); err != nil { - return fmt.Errorf("failed to delete default multicast policy, error: %v", err) - } - } // Create OVNJoinSwitch that will be used to connect gateway routers to the distributed router. return oc.gatewayTopologyFactory.NewJoinSwitch(logicalRouter, oc.NetInfo, oc.ovnClusterLRPToJoinIfAddrs) @@ -130,7 +75,7 @@ func (oc *DefaultNetworkController) newClusterRouter() (*nbdb.LogicalRouter, err } func (oc *DefaultNetworkController) syncNodeManagementPortDefault(node *kapi.Node, switchName string, hostSubnets []*net.IPNet) error { - mgmtPortIPs, err := oc.syncNodeManagementPortRouteHostSubnets(node, switchName, hostSubnets) + mgmtPortIPs, err := oc.syncNodeManagementPort(node, switchName, oc.GetNetworkScopedClusterRouterName(), hostSubnets) if err == nil { return oc.setupUDNACLs(mgmtPortIPs) } @@ -829,6 +774,9 @@ func (oc *DefaultNetworkController) deleteHoNodeEvent(node *kapi.Node) error { func (oc *DefaultNetworkController) addIPToHostNetworkNamespaceAddrSet(node *kapi.Node) error { var hostNetworkPolicyIPs []net.IP + if util.NoHostSubnet(node) { + return nil + } hostNetworkPolicyIPs, err := oc.getHostNamespaceAddressesForNode(node) if err != nil { parsedErr := err diff --git a/go-controller/pkg/ovn/master_test.go b/go-controller/pkg/ovn/master_test.go index 48161f13ab..c87d4173c5 100644 --- a/go-controller/pkg/ovn/master_test.go +++ b/go-controller/pkg/ovn/master_test.go @@ -12,7 +12,7 @@ import ( "testing" "time" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" libovsdbclient "github.com/ovn-org/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" @@ -1051,7 +1051,7 @@ var _ = ginkgo.Describe("Default network controller operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) recorder = record.NewFakeRecorder(10) - oc, err = NewOvnController(fakeClient, f, stopChan, nil, nbClient, sbClient, recorder, wg) + oc, err = NewOvnController(fakeClient, f, stopChan, nil, nbClient, sbClient, recorder, wg, nil, NewPortCache(stopChan)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(oc).NotTo(gomega.BeNil()) @@ -1834,22 +1834,32 @@ func newClusterJoinSwitch() *nbdb.LogicalSwitch { } } -func newClusterPortGroup() *nbdb.PortGroup { - fakeController := getFakeController(DefaultNetworkControllerName) +func newNetworkClusterPortGroup(netInfo util.NetInfo) *nbdb.PortGroup { + netControllerName := getNetworkControllerName(netInfo.GetNetworkName()) + fakeController := getFakeController(netControllerName) pgIDs := fakeController.getClusterPortGroupDbIDs(types.ClusterPortGroupNameBase) pg := libovsdbutil.BuildPortGroup(pgIDs, nil, nil) pg.UUID = pgIDs.String() return pg } -func newRouterPortGroup() *nbdb.PortGroup { - fakeController := getFakeController(DefaultNetworkControllerName) +func newNetworkRouterPortGroup(netInfo util.NetInfo) *nbdb.PortGroup { + netControllerName := getNetworkControllerName(netInfo.GetNetworkName()) + fakeController := getFakeController(netControllerName) pgIDs := fakeController.getClusterPortGroupDbIDs(types.ClusterRtrPortGroupNameBase) pg := libovsdbutil.BuildPortGroup(pgIDs, nil, nil) pg.UUID = pgIDs.String() return pg } +func newClusterPortGroup() *nbdb.PortGroup { + return newNetworkClusterPortGroup(&util.DefaultNetInfo{}) +} + +func newRouterPortGroup() *nbdb.PortGroup { + return newNetworkRouterPortGroup(&util.DefaultNetInfo{}) +} + func newOVNClusterRouter() *nbdb.LogicalRouter { return &nbdb.LogicalRouter{ UUID: types.OVNClusterRouter + "-UUID", @@ -2051,7 +2061,7 @@ func TestController_syncNodes(t *testing.T) { nbClient, sbClient, record.NewFakeRecorder(0), - wg) + wg, nil, NewPortCache(stopChan)) gomega.Expect(err).ToNot(gomega.HaveOccurred()) err = controller.syncNodes([]interface{}{&testNode}) if err != nil { @@ -2152,7 +2162,7 @@ func TestController_deleteStaleNodeChassis(t *testing.T) { nbClient, sbClient, record.NewFakeRecorder(0), - wg) + wg, nil, NewPortCache(stopChan)) gomega.Expect(err).ToNot(gomega.HaveOccurred()) err = controller.deleteStaleNodeChassis(&tt.node) diff --git a/go-controller/pkg/ovn/multicast_test.go b/go-controller/pkg/ovn/multicast_test.go index a161bb5b13..5ba004d581 100644 --- a/go-controller/pkg/ovn/multicast_test.go +++ b/go-controller/pkg/ovn/multicast_test.go @@ -2,60 +2,35 @@ package ovn import ( "context" + "fmt" - "github.com/onsi/ginkgo" - "github.com/onsi/gomega" - "github.com/onsi/gomega/format" "github.com/urfave/cli/v2" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/onsi/gomega/format" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" + ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + nadapi "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) -type ipMode struct { - IPv4Mode bool - IPv6Mode bool -} - -// FIXME DUAL-STACK: FakeOVN doesn't really support adding more than one -// pod to the namespace. All logical ports would share the same fakeUUID. -// When this is addressed we can add an entry for -// IPv4Mode = true, IPv6Mode = true. -func getIpModes() []ipMode { - return []ipMode{ - {true, false}, - {false, true}, - } -} - -func ipModeStr(m ipMode) string { - if m.IPv4Mode && m.IPv6Mode { - return "dualstack" - } else if m.IPv4Mode { - return "ipv4" - } else if m.IPv6Mode { - return "ipv6" - } else { - return "no IP mode set" - } -} - -func setIpMode(m ipMode) { - config.IPv4Mode = m.IPv4Mode - config.IPv6Mode = m.IPv6Mode -} - -func getMulticastDefaultExpectedData(clusterPortGroup, clusterRtrPortGroup *nbdb.PortGroup) []libovsdb.TestData { +func getMulticastExpectedData(netInfo util.NetInfo, clusterPortGroup, clusterRtrPortGroup *nbdb.PortGroup) []libovsdb.TestData { + netControllerName := getNetworkControllerName(netInfo.GetNetworkName()) match := getMulticastACLMatch() - aclIDs := getDefaultMcastACLDbIDs(mcastDefaultDenyID, libovsdbutil.ACLEgress, DefaultNetworkControllerName) + aclIDs := getDefaultMcastACLDbIDs(mcastDefaultDenyID, libovsdbutil.ACLEgress, netControllerName) aclName := libovsdbutil.GetACLName(aclIDs) defaultDenyEgressACL := libovsdbops.BuildACL( aclName, @@ -74,7 +49,7 @@ func getMulticastDefaultExpectedData(clusterPortGroup, clusterRtrPortGroup *nbdb ) defaultDenyEgressACL.UUID = "defaultDenyEgressACL_UUID" - aclIDs = getDefaultMcastACLDbIDs(mcastDefaultDenyID, libovsdbutil.ACLIngress, DefaultNetworkControllerName) + aclIDs = getDefaultMcastACLDbIDs(mcastDefaultDenyID, libovsdbutil.ACLIngress, netControllerName) aclName = libovsdbutil.GetACLName(aclIDs) defaultDenyIngressACL := libovsdbops.BuildACL( aclName, @@ -92,7 +67,7 @@ func getMulticastDefaultExpectedData(clusterPortGroup, clusterRtrPortGroup *nbdb defaultDenyIngressACL.UUID = "defaultDenyIngressACL_UUID" clusterPortGroup.ACLs = []string{defaultDenyEgressACL.UUID, defaultDenyIngressACL.UUID} - aclIDs = getDefaultMcastACLDbIDs(mcastAllowInterNodeID, libovsdbutil.ACLEgress, DefaultNetworkControllerName) + aclIDs = getDefaultMcastACLDbIDs(mcastAllowInterNodeID, libovsdbutil.ACLEgress, netControllerName) aclName = libovsdbutil.GetACLName(aclIDs) egressMatch := libovsdbutil.GetACLMatch(clusterRtrPortGroup.Name, match, libovsdbutil.ACLEgress) defaultAllowEgressACL := libovsdbops.BuildACL( @@ -112,7 +87,7 @@ func getMulticastDefaultExpectedData(clusterPortGroup, clusterRtrPortGroup *nbdb ) defaultAllowEgressACL.UUID = "defaultAllowEgressACL_UUID" - aclIDs = getDefaultMcastACLDbIDs(mcastAllowInterNodeID, libovsdbutil.ACLIngress, DefaultNetworkControllerName) + aclIDs = getDefaultMcastACLDbIDs(mcastAllowInterNodeID, libovsdbutil.ACLIngress, netControllerName) aclName = libovsdbutil.GetACLName(aclIDs) ingressMatch := libovsdbutil.GetACLMatch(clusterRtrPortGroup.Name, match, libovsdbutil.ACLIngress) defaultAllowIngressACL := libovsdbops.BuildACL( @@ -140,8 +115,8 @@ func getMulticastDefaultExpectedData(clusterPortGroup, clusterRtrPortGroup *nbdb } } -func getMulticastDefaultStaleData(clusterPortGroup, clusterRtrPortGroup *nbdb.PortGroup) []libovsdb.TestData { - testData := getMulticastDefaultExpectedData(clusterPortGroup, clusterRtrPortGroup) +func getMulticastStaleData(netInfo util.NetInfo, clusterPortGroup, clusterRtrPortGroup *nbdb.PortGroup) []libovsdb.TestData { + testData := getMulticastExpectedData(netInfo, clusterPortGroup, clusterRtrPortGroup) defaultDenyIngressACL := testData[0].(*nbdb.ACL) newName := libovsdbutil.JoinACLName(types.ClusterPortGroupNameBase, "DefaultDenyMulticastIngress") defaultDenyIngressACL.Name = &newName @@ -172,22 +147,17 @@ func getMulticastDefaultStaleData(clusterPortGroup, clusterRtrPortGroup *nbdb.Po } } -func getDefaultPortGroups() (clusterPortGroup, clusterRtrPortGroup *nbdb.PortGroup) { - clusterPortGroup = newClusterPortGroup() - clusterRtrPortGroup = newRouterPortGroup() - return -} - -func getMulticastPolicyExpectedData(ns string, ports []string) []libovsdb.TestData { - fakeController := getFakeController(DefaultNetworkControllerName) +func getMulticastPolicyExpectedData(netInfo util.NetInfo, ns string, ports []string) []libovsdb.TestData { + netControllerName := getNetworkControllerName(netInfo.GetNetworkName()) + fakeController := getFakeController(netControllerName) pg_hash := fakeController.getNamespacePortGroupName(ns) egressMatch := libovsdbutil.GetACLMatch(pg_hash, fakeController.getMulticastACLEgrMatch(), libovsdbutil.ACLEgress) - ip4AddressSet, ip6AddressSet := getNsAddrSetHashNames(ns) + ip4AddressSet, ip6AddressSet := getNsAddrSetHashNames(netControllerName, ns) mcastMatch := getACLMatchAF(getMulticastACLIgrMatchV4(ip4AddressSet), getMulticastACLIgrMatchV6(ip6AddressSet), config.IPv4Mode, config.IPv6Mode) ingressMatch := libovsdbutil.GetACLMatch(pg_hash, mcastMatch, libovsdbutil.ACLIngress) - aclIDs := getNamespaceMcastACLDbIDs(ns, libovsdbutil.ACLEgress, DefaultNetworkControllerName) + aclIDs := getNamespaceMcastACLDbIDs(ns, libovsdbutil.ACLEgress, netControllerName) aclName := libovsdbutil.GetACLName(aclIDs) egressACL := libovsdbops.BuildACL( aclName, @@ -206,7 +176,7 @@ func getMulticastPolicyExpectedData(ns string, ports []string) []libovsdb.TestDa ) egressACL.UUID = ns + "mc-egress-UUID" - aclIDs = getNamespaceMcastACLDbIDs(ns, libovsdbutil.ACLIngress, DefaultNetworkControllerName) + aclIDs = getNamespaceMcastACLDbIDs(ns, libovsdbutil.ACLIngress, netControllerName) aclName = libovsdbutil.GetACLName(aclIDs) ingressACL := libovsdbops.BuildACL( aclName, @@ -250,8 +220,8 @@ func getNamespacePG(ns, controllerName string) *nbdb.PortGroup { return pg } -func getMulticastPolicyStaleData(ns string, ports []string) []libovsdb.TestData { - testData := getMulticastPolicyExpectedData(ns, ports) +func getMulticastPolicyStaleData(netInfo util.NetInfo, ns string, ports []string) []libovsdb.TestData { + testData := getMulticastPolicyExpectedData(netInfo, ns, ports) egressACL := testData[0].(*nbdb.ACL) newName := libovsdbutil.JoinACLName(ns, "MulticastAllowEgress") @@ -270,16 +240,39 @@ func getMulticastPolicyStaleData(ns string, ports []string) []libovsdb.TestData } } -func getNodeSwitch(nodeName string) []libovsdb.TestData { +func getNetInfoFromNAD(nad *nadapi.NetworkAttachmentDefinition) util.NetInfo { + if nad == nil { + return &util.DefaultNetInfo{} + } + netInfo, err := util.ParseNADInfo(nad) + Expect(err).NotTo(HaveOccurred()) + return netInfo +} + +func getNodeData(netInfo util.NetInfo, nodeName string) []libovsdb.TestData { + switchName := netInfo.GetNetworkScopedSwitchName(nodeName) return []libovsdb.TestData{ &nbdb.LogicalSwitch{ - UUID: nodeName + "_UUID", - Name: nodeName, + UUID: switchName + "_UUID", + Name: switchName, }, } } -func createTestPods(nodeName, namespace string, ipM ipMode) (pods []v1.Pod, tPods []testPod, tPodIPs []string) { +func newNodeWithNad(nad *nadapi.NetworkAttachmentDefinition, networkName string) *v1.Node { + n := newNode(nodeName, "192.168.126.202/24") + if nad != nil { + n.Annotations["k8s.ovn.org/node-subnets"] = fmt.Sprintf("{\"default\":\"192.168.126.202/24\", \"%s\":\"192.168.127.202/24\"}", networkName) + n.Annotations["k8s.ovn.org/network-ids"] = fmt.Sprintf("{\"default\":\"0\",\"%s\":\"50\"}", networkName) + n.Annotations["k8s.ovn.org/node-mgmt-port-mac-addresses"] = fmt.Sprintf("{\"default\":\"96:8f:e8:25:a2:e5\",\"%s\":\"d6:bc:85:32:30:fb\"}", networkName) + n.Annotations["k8s.ovn.org/node-chassis-id"] = "abdcef" + n.Annotations["k8s.ovn.org/l3-gateway-config"] = "{\"default\":{\"mac-address\":\"52:54:00:e2:ed:d0\",\"ip-addresses\":[\"10.1.1.10/24\"],\"ip-address\":\"10.1.1.10/24\",\"next-hops\":[\"10.1.1.1\"],\"next-hop\":\"10.1.1.1\"}}" + n.Annotations["k8s.ovn.org/node-gateway-router-lrp-ifaddrs"] = fmt.Sprintf("{\"default\":{\"ipv4\":\"100.64.0.4/16\"},\"%s\":{\"ipv4\":\"100.65.0.4/16\"}}", networkName) + } + return n +} + +func createTestPods(nodeName, namespace string, useIPv4, useIPv6 bool) (pods []v1.Pod, tPods []testPod, tPodIPs []string) { nPodTestV4 := newTPod( nodeName, "10.128.1.0/24", @@ -300,11 +293,11 @@ func createTestPods(nodeName, namespace string, ipM ipMode) (pods []v1.Pod, tPod "0a:58:dd:33:05:d8", namespace, ) - if ipM.IPv4Mode { + if useIPv4 { tPods = append(tPods, nPodTestV4) tPodIPs = append(tPodIPs, nPodTestV4.podIP) } - if ipM.IPv6Mode { + if useIPv6 { tPods = append(tPods, nPodTestV6) tPodIPs = append(tPodIPs, nPodTestV6.podIP) } @@ -321,26 +314,57 @@ func updateMulticast(fakeOvn *FakeOVN, ns *v1.Namespace, enable bool) { ns.Annotations[util.NsMulticastAnnotation] = "false" } _, err := fakeOvn.fakeClient.KubeClient.CoreV1().Namespaces().Update(context.TODO(), ns, metav1.UpdateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + Expect(err).NotTo(HaveOccurred()) +} + +func startBaseNetworkController(fakeOvn *FakeOVN, nad *nadapi.NetworkAttachmentDefinition) (*BaseNetworkController, *addressset.FakeAddressSetFactory) { + if nad != nil { + netInfo, err := util.ParseNADInfo(nad) + Expect(err).ToNot(HaveOccurred()) + Expect(fakeOvn.NewSecondaryNetworkController(nad)).To(Succeed()) + controller, ok := fakeOvn.secondaryControllers[netInfo.GetNetworkName()] + Expect(ok).To(BeTrue()) + return &controller.bnc.BaseNetworkController, controller.asf + } else { + return &fakeOvn.controller.BaseNetworkController, fakeOvn.asf + } } -var _ = ginkgo.Describe("OVN Multicast with IP Address Family", func() { +var _ = Describe("OVN Multicast with IP Address Family", func() { const ( namespaceName1 = "namespace1" - nodeName = "node1" ) + var ( app *cli.App fakeOvn *FakeOVN gomegaFormatMaxLength int + networkName = "bluenet" + nadName = "rednad" + + nadFromIPMode = func(useIPv4, useIPv6 bool) *nadapi.NetworkAttachmentDefinition { + if useIPv4 && useIPv6 { + return ovntest.GenerateNAD(networkName, nadName, namespaceName1, + types.Layer3Topology, "100.128.0.0/16,ae70::66/60", types.NetworkRolePrimary) + } else if useIPv4 { + return ovntest.GenerateNAD(networkName, nadName, namespaceName1, + types.Layer3Topology, "100.128.0.0/16", types.NetworkRolePrimary) + } else { + return ovntest.GenerateNAD(networkName, nadName, namespaceName1, + types.Layer3Topology, "ae70::66/60", types.NetworkRolePrimary) + } + } ) - ginkgo.BeforeEach(func() { + BeforeEach(func() { // Restore global default values before each testcase config.PrepareTestConfig() - config.IPv4Mode = true - config.IPv6Mode = false config.EnableMulticast = true + config.OVNKubernetesFeature.EnableNetworkSegmentation = true + config.OVNKubernetesFeature.EnableMultiNetwork = true + config.OVNKubernetesFeature.EnableMultiNetworkPolicy = true + config.Gateway.V6MasqueradeSubnet = "fd69::/112" + config.Gateway.V4MasqueradeSubnet = "169.254.0.0/17" app = cli.NewApp() app.Name = "test" @@ -354,60 +378,88 @@ var _ = ginkgo.Describe("OVN Multicast with IP Address Family", func() { format.MaxLength = 0 }) - ginkgo.AfterEach(func() { + AfterEach(func() { fakeOvn.shutdown() format.MaxLength = gomegaFormatMaxLength }) - ginkgo.Context("on startup", func() { - ginkgo.It("creates default Multicast ACLs", func() { + Context("on startup", func() { + DescribeTable("creates default Multicast ACLs", func(useIPv4, useIPv6 bool, nad *nadapi.NetworkAttachmentDefinition) { app.Action = func(ctx *cli.Context) error { - clusterPortGroup, clusterRtrPortGroup := getDefaultPortGroups() + config.IPv4Mode = useIPv4 + config.IPv6Mode = useIPv6 + + netInfo := getNetInfoFromNAD(nad) + clusterPortGroup := newNetworkClusterPortGroup(netInfo) + clusterRtrPortGroup := newNetworkRouterPortGroup(netInfo) fakeOvn.startWithDBSetup(libovsdb.TestSetup{ NBData: []libovsdb.TestData{ clusterPortGroup, clusterRtrPortGroup, }, }) - // this is "if oc.multicastSupport" part of SetupMaster - err := fakeOvn.controller.createDefaultDenyMulticastPolicy() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = fakeOvn.controller.createDefaultAllowMulticastPolicy() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - gomega.Eventually(fakeOvn.nbClient).Should(libovsdb.HaveData( - getMulticastDefaultExpectedData(clusterPortGroup, clusterRtrPortGroup))) + bnc, _ := startBaseNetworkController(fakeOvn, nad) + + Expect(bnc.createDefaultDenyMulticastPolicy()).To(Succeed()) + Expect(bnc.createDefaultAllowMulticastPolicy()).To(Succeed()) + + Eventually(fakeOvn.nbClient).Should(libovsdb.HaveData( + getMulticastExpectedData(netInfo, clusterPortGroup, clusterRtrPortGroup))) return nil } + err := app.Run([]string{app.Name}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) - ginkgo.It("updates stale default Multicast ACLs", func() { + Expect(err).NotTo(HaveOccurred()) + }, + Entry("IPv4", true, false, nil), + Entry("IPv6", false, true, nil), + Entry("[Network Segmentation] IPv4", true, false, nadFromIPMode(true, false)), + Entry("[Network Segmentation] IPv6", false, true, nadFromIPMode(false, true)), + ) + + DescribeTable("updates stale default Multicast ACLs", func(useIPv4, useIPv6 bool, nad *nadapi.NetworkAttachmentDefinition) { app.Action = func(ctx *cli.Context) error { + config.IPv4Mode = useIPv4 + config.IPv6Mode = useIPv6 + // start with stale ACLs - clusterPortGroup, clusterRtrPortGroup := getDefaultPortGroups() + netInfo := getNetInfoFromNAD(nad) + clusterPortGroup := newNetworkClusterPortGroup(netInfo) + clusterRtrPortGroup := newNetworkRouterPortGroup(netInfo) fakeOvn.startWithDBSetup(libovsdb.TestSetup{ - NBData: getMulticastDefaultStaleData(clusterPortGroup, clusterRtrPortGroup), + NBData: getMulticastStaleData(netInfo, clusterPortGroup, clusterRtrPortGroup), }) - // this is "if oc.multicastSupport" part of SetupMaster - err := fakeOvn.controller.createDefaultDenyMulticastPolicy() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = fakeOvn.controller.createDefaultAllowMulticastPolicy() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + bnc, _ := startBaseNetworkController(fakeOvn, nad) + + Expect(bnc.createDefaultDenyMulticastPolicy()).To(Succeed()) + Expect(bnc.createDefaultAllowMulticastPolicy()).To(Succeed()) + // check acls are updated - gomega.Eventually(fakeOvn.nbClient).Should(libovsdb.HaveData( - getMulticastDefaultExpectedData(clusterPortGroup, clusterRtrPortGroup))) + Eventually(fakeOvn.nbClient).Should(libovsdb.HaveData( + getMulticastExpectedData(netInfo, clusterPortGroup, clusterRtrPortGroup))) return nil } + err := app.Run([]string{app.Name}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) - ginkgo.It("cleans up Multicast resources when multicast is disabled", func() { + Expect(err).NotTo(HaveOccurred()) + }, + Entry("IPv4", true, false, nil), + Entry("IPv6", false, true, nil), + Entry("[Network Segmentation] IPv4", true, false, nadFromIPMode(true, false)), + Entry("[Network Segmentation] IPv6", false, true, nadFromIPMode(false, true)), + ) + + DescribeTable("cleans up Multicast resources when multicast is disabled", func(useIPv4, useIPv6 bool, nad *nadapi.NetworkAttachmentDefinition) { app.Action = func(ctx *cli.Context) error { - clusterPortGroup, clusterRtrPortGroup := getDefaultPortGroups() - initialData := getMulticastDefaultExpectedData(clusterPortGroup, clusterRtrPortGroup) + config.IPv4Mode = useIPv4 + config.IPv6Mode = useIPv6 + + netInfo := getNetInfoFromNAD(nad) + clusterPortGroup := newNetworkClusterPortGroup(netInfo) + clusterRtrPortGroup := newNetworkRouterPortGroup(netInfo) + initialData := getMulticastExpectedData(netInfo, clusterPortGroup, clusterRtrPortGroup) - nsData := getMulticastPolicyExpectedData(namespaceName1, nil) + nsData := getMulticastPolicyExpectedData(netInfo, namespaceName1, nil) initialData = append(initialData, nsData...) // namespace is still present, but multicast support is disabled namespace1 := *newNamespace(namespaceName1) @@ -418,27 +470,41 @@ var _ = ginkgo.Describe("OVN Multicast with IP Address Family", func() { }, }, ) + bnc, _ := startBaseNetworkController(fakeOvn, nad) + // this "if !oc.multicastSupport" part of SetupMaster - err := fakeOvn.controller.disableMulticast() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + Expect(bnc.disableMulticast()).To(Succeed()) // check acls are deleted when multicast is disabled - clusterPortGroup, clusterRtrPortGroup = getDefaultPortGroups() - namespacePortGroup := getNamespacePG(namespaceName1, fakeOvn.controller.controllerName) + clusterPortGroup = newNetworkClusterPortGroup(netInfo) + clusterRtrPortGroup = newNetworkRouterPortGroup(netInfo) + namespacePortGroup := getNamespacePG(namespaceName1, getNetworkControllerName(netInfo.GetNetworkName())) expectedData := []libovsdb.TestData{ clusterPortGroup, clusterRtrPortGroup, namespacePortGroup, } - gomega.Eventually(fakeOvn.nbClient).Should(libovsdb.HaveData(expectedData)) + Eventually(fakeOvn.nbClient).Should(libovsdb.HaveData(expectedData)) return nil } + err := app.Run([]string{app.Name}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) - ginkgo.It("creates namespace Multicast ACLs", func() { + Expect(err).NotTo(HaveOccurred()) + }, + Entry("IPv4", true, false, nil), + Entry("IPv6", false, true, nil), + Entry("[Network Segmentation] IPv4", true, false, nadFromIPMode(true, false)), + Entry("[Network Segmentation] IPv6", false, true, nadFromIPMode(false, true)), + ) + + DescribeTable("creates namespace Multicast ACLs", func(useIPv4, useIPv6 bool, nad *nadapi.NetworkAttachmentDefinition) { app.Action = func(ctx *cli.Context) error { - clusterPortGroup, clusterRtrPortGroup := getDefaultPortGroups() - expectedData := getMulticastDefaultExpectedData(clusterPortGroup, clusterRtrPortGroup) + config.IPv4Mode = useIPv4 + config.IPv6Mode = useIPv6 + + netInfo := getNetInfoFromNAD(nad) + clusterPortGroup := newNetworkClusterPortGroup(netInfo) + clusterRtrPortGroup := newNetworkRouterPortGroup(netInfo) + expectedData := getMulticastExpectedData(netInfo, clusterPortGroup, clusterRtrPortGroup) // namespace exists, but multicast acls do not namespace1 := *newNamespace(namespaceName1) namespace1.Annotations[util.NsMulticastAnnotation] = "true" @@ -449,21 +515,34 @@ var _ = ginkgo.Describe("OVN Multicast with IP Address Family", func() { }, }, ) - err := fakeOvn.controller.WatchNamespaces() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - expectedData = append(expectedData, getMulticastPolicyExpectedData(namespaceName1, nil)...) - gomega.Eventually(fakeOvn.nbClient).Should(libovsdb.HaveData(expectedData)) + bnc, _ := startBaseNetworkController(fakeOvn, nad) + + Expect(bnc.WatchNamespaces()).To(Succeed()) + expectedData = append(expectedData, getMulticastPolicyExpectedData(netInfo, namespaceName1, nil)...) + Eventually(fakeOvn.nbClient).Should(libovsdb.HaveData(expectedData)) return nil } + err := app.Run([]string{app.Name}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) - ginkgo.It("updates stale namespace Multicast ACLs", func() { + Expect(err).NotTo(HaveOccurred()) + }, + Entry("IPv4", true, false, nil), + Entry("IPv6", false, true, nil), + Entry("[Network Segmentation] IPv4", true, false, nadFromIPMode(true, false)), + Entry("[Network Segmentation] IPv6", false, true, nadFromIPMode(false, true)), + ) + + DescribeTable("updates stale namespace Multicast ACLs", func(useIPv4, useIPv6 bool, nad *nadapi.NetworkAttachmentDefinition) { app.Action = func(ctx *cli.Context) error { + config.IPv4Mode = useIPv4 + config.IPv6Mode = useIPv6 + // start with stale ACLs for existing namespace - clusterPortGroup, clusterRtrPortGroup := getDefaultPortGroups() - expectedData := getMulticastDefaultExpectedData(clusterPortGroup, clusterRtrPortGroup) - expectedData = append(expectedData, getMulticastPolicyStaleData(namespaceName1, nil)...) + netInfo := getNetInfoFromNAD(nad) + clusterPortGroup := newNetworkClusterPortGroup(netInfo) + clusterRtrPortGroup := newNetworkRouterPortGroup(netInfo) + expectedData := getMulticastExpectedData(netInfo, clusterPortGroup, clusterRtrPortGroup) + expectedData = append(expectedData, getMulticastPolicyStaleData(netInfo, namespaceName1, nil)...) namespace1 := *newNamespace(namespaceName1) namespace1.Annotations[util.NsMulticastAnnotation] = "true" fakeOvn.startWithDBSetup(libovsdb.TestSetup{NBData: expectedData}, @@ -473,25 +552,35 @@ var _ = ginkgo.Describe("OVN Multicast with IP Address Family", func() { }, }, ) + bnc, _ := startBaseNetworkController(fakeOvn, nad) - err := fakeOvn.controller.WatchNamespaces() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - expectedData = getMulticastDefaultExpectedData(clusterPortGroup, clusterRtrPortGroup) - expectedData = append(expectedData, getMulticastPolicyExpectedData(namespaceName1, nil)...) - gomega.Eventually(fakeOvn.nbClient).Should(libovsdb.HaveData(expectedData)) + Expect(bnc.WatchNamespaces()).To(Succeed()) + expectedData = getMulticastExpectedData(netInfo, clusterPortGroup, clusterRtrPortGroup) + expectedData = append(expectedData, getMulticastPolicyExpectedData(netInfo, namespaceName1, nil)...) + Eventually(fakeOvn.nbClient).Should(libovsdb.HaveData(expectedData)) return nil } err := app.Run([]string{app.Name}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) - ginkgo.It("cleans up namespace Multicast ACLs when multicast is disabled for namespace", func() { + Expect(err).NotTo(HaveOccurred()) + }, + Entry("IPv4", true, false, nil), + Entry("IPv6", false, true, nil), + Entry("[Network Segmentation] IPv4", true, false, nadFromIPMode(true, false)), + Entry("[Network Segmentation] IPv6", false, true, nadFromIPMode(false, true)), + ) + + DescribeTable("cleans up namespace Multicast ACLs when multicast is disabled for namespace", func(useIPv4, useIPv6 bool, nad *nadapi.NetworkAttachmentDefinition) { app.Action = func(ctx *cli.Context) error { + config.IPv4Mode = useIPv4 + config.IPv6Mode = useIPv6 + // start with stale ACLs - clusterPortGroup, clusterRtrPortGroup := getDefaultPortGroups() - defaultMulticastData := getMulticastDefaultExpectedData(clusterPortGroup, clusterRtrPortGroup) - namespaceMulticastData := getMulticastPolicyExpectedData(namespaceName1, nil) + netInfo := getNetInfoFromNAD(nad) + clusterPortGroup := newNetworkClusterPortGroup(netInfo) + clusterRtrPortGroup := newNetworkRouterPortGroup(netInfo) + defaultMulticastData := getMulticastExpectedData(netInfo, clusterPortGroup, clusterRtrPortGroup) + namespaceMulticastData := getMulticastPolicyExpectedData(netInfo, namespaceName1, nil) namespace1 := *newNamespace(namespaceName1) fakeOvn.startWithDBSetup(libovsdb.TestSetup{NBData: append(defaultMulticastData, namespaceMulticastData...)}, @@ -501,233 +590,312 @@ var _ = ginkgo.Describe("OVN Multicast with IP Address Family", func() { }, }, ) + bnc, _ := startBaseNetworkController(fakeOvn, nad) - err := fakeOvn.controller.WatchNamespaces() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + Expect(bnc.WatchNamespaces()).To(Succeed()) // only namespaced acls should be dereferenced, default acls will stay - namespacePortGroup := getNamespacePG(namespaceName1, fakeOvn.controller.controllerName) + namespacePortGroup := getNamespacePG(namespaceName1, getNetworkControllerName(netInfo.GetNetworkName())) expectedData := append(defaultMulticastData, namespacePortGroup) - gomega.Eventually(fakeOvn.nbClient).Should(libovsdb.HaveData(expectedData)) + Eventually(fakeOvn.nbClient).Should(libovsdb.HaveData(expectedData)) return nil } + err := app.Run([]string{app.Name}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) + Expect(err).NotTo(HaveOccurred()) + }, + Entry("IPv4", true, false, nil), + Entry("IPv6", false, true, nil), + Entry("[Network Segmentation] IPv4", true, false, nadFromIPMode(true, false)), + Entry("[Network Segmentation] IPv6", false, true, nadFromIPMode(false, true)), + ) }) - ginkgo.Context("during execution", func() { - for _, m := range getIpModes() { - m := m - ginkgo.It("tests enabling/disabling multicast in a namespace "+ipModeStr(m), func() { - app.Action = func(ctx *cli.Context) error { - namespace1 := *newNamespace(namespaceName1) - - fakeOvn.startWithDBSetup(libovsdb.TestSetup{}, - &v1.NamespaceList{ - Items: []v1.Namespace{ - namespace1, - }, + Context("during execution", func() { + DescribeTable("tests enabling/disabling multicast in a namespace", func(useIPv4, useIPv6 bool, nad *nadapi.NetworkAttachmentDefinition) { + app.Action = func(ctx *cli.Context) error { + config.IPv4Mode = useIPv4 + config.IPv6Mode = useIPv6 + + netInfo := getNetInfoFromNAD(nad) + namespace1 := *newNamespace(namespaceName1) + fakeOvn.startWithDBSetup(libovsdb.TestSetup{}, + &v1.NamespaceList{ + Items: []v1.Namespace{ + namespace1, }, - ) - setIpMode(m) - - err := fakeOvn.controller.WatchNamespaces() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ns, err := fakeOvn.fakeClient.KubeClient.CoreV1().Namespaces().Get(context.TODO(), namespace1.Name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(ns).NotTo(gomega.BeNil()) - - // Multicast is denied by default. - _, ok := ns.Annotations[util.NsMulticastAnnotation] - gomega.Expect(ok).To(gomega.BeFalse()) - - // Enable multicast in the namespace. - updateMulticast(fakeOvn, ns, true) - expectedData := getMulticastPolicyExpectedData(namespace1.Name, nil) - gomega.Eventually(fakeOvn.nbClient).Should(libovsdb.HaveData(expectedData...)) - - // Disable multicast in the namespace. - updateMulticast(fakeOvn, ns, false) - - namespacePortGroup := getNamespacePG(namespaceName1, fakeOvn.controller.controllerName) - gomega.Eventually(fakeOvn.nbClient).Should(libovsdb.HaveData(namespacePortGroup)) - return nil - } + }, + ) - err := app.Run([]string{app.Name}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) + bnc, _ := startBaseNetworkController(fakeOvn, nad) + Expect(bnc.WatchNamespaces()).To(Succeed()) - ginkgo.It("tests enabling multicast in a namespace with a pod "+ipModeStr(m), func() { - app.Action = func(ctx *cli.Context) error { - namespace1 := *newNamespace(namespaceName1) - pods, tPods, tPodIPs := createTestPods(nodeName, namespaceName1, m) + ns, err := fakeOvn.fakeClient.KubeClient.CoreV1().Namespaces().Get(context.TODO(), namespace1.Name, metav1.GetOptions{}) + Expect(err).To(Succeed()) + Expect(ns).NotTo(BeNil()) - fakeOvn.startWithDBSetup(libovsdb.TestSetup{NBData: getNodeSwitch(nodeName)}, - &v1.NamespaceList{ - Items: []v1.Namespace{ - namespace1, - }, - }, - &v1.NodeList{ - Items: []v1.Node{ - *newNode("node1", "192.168.126.202/24"), - }, + // Multicast is denied by default. + _, ok := ns.Annotations[util.NsMulticastAnnotation] + Expect(ok).To(BeFalse()) + + // Enable multicast in the namespace. + updateMulticast(fakeOvn, ns, true) + expectedData := getMulticastPolicyExpectedData(netInfo, namespace1.Name, nil) + Eventually(fakeOvn.nbClient).Should(libovsdb.HaveData(expectedData...)) + + // Disable multicast in the namespace. + updateMulticast(fakeOvn, ns, false) + + namespacePortGroup := getNamespacePG(namespaceName1, bnc.controllerName) + Eventually(fakeOvn.nbClient).Should(libovsdb.HaveData(namespacePortGroup)) + + return nil + } + + err := app.Run([]string{app.Name}) + Expect(err).NotTo(HaveOccurred()) + }, + Entry("IPv4", true, false, nil), + Entry("IPv6", false, true, nil), + Entry("[Network Segmentation] IPv4", true, false, nadFromIPMode(true, false)), + Entry("[Network Segmentation] IPv6", false, true, nadFromIPMode(false, true)), + ) + + DescribeTable("tests enabling multicast in a namespace with a pod", func(useIPv4, useIPv6 bool, nad *nadapi.NetworkAttachmentDefinition) { + app.Action = func(ctx *cli.Context) error { + config.IPv4Mode = useIPv4 + config.IPv6Mode = useIPv6 + + netInfo := getNetInfoFromNAD(nad) + node := newNodeWithNad(nad, networkName) + namespace1 := *newNamespace(namespaceName1) + pods, tPods, tPodIPs := createTestPods(nodeName, namespaceName1, useIPv4, useIPv6) + + objs := []runtime.Object{ + &v1.NamespaceList{ + Items: []v1.Namespace{ + namespace1, }, - &v1.PodList{ - Items: pods, + }, + &v1.NodeList{ + Items: []v1.Node{ + *node, }, - ) - setIpMode(m) - - for _, tPod := range tPods { - tPod.populateLogicalSwitchCache(fakeOvn) - } - - err := fakeOvn.controller.WatchNamespaces() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = fakeOvn.controller.WatchPods() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ns, err := fakeOvn.fakeClient.KubeClient.CoreV1().Namespaces().Get(context.TODO(), namespace1.Name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(ns).NotTo(gomega.BeNil()) - - // Enable multicast in the namespace - updateMulticast(fakeOvn, ns, true) - // calculate expected data - ports := []string{} - for _, tPod := range tPods { - ports = append(ports, tPod.portUUID) - } - expectedData := getMulticastPolicyExpectedData(namespace1.Name, ports) - expectedData = append(expectedData, getDefaultNetExpectedPodsAndSwitches(tPods, []string{nodeName})...) - gomega.Eventually(fakeOvn.nbClient).Should(libovsdb.HaveData(expectedData...)) - fakeOvn.asf.ExpectAddressSetWithAddresses(namespace1.Name, tPodIPs) - return nil + }, + &v1.PodList{ + Items: pods, + }, + } + if nad != nil { + objs = append(objs, &nadapi.NetworkAttachmentDefinitionList{ + Items: []nadapi.NetworkAttachmentDefinition{*nad}, + }) } - err := app.Run([]string{app.Name}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) - - ginkgo.It("tests enabling multicast in multiple namespaces with a long name > 42 characters "+ipModeStr(m), func() { - app.Action = func(ctx *cli.Context) error { - longNameSpace1Name := "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijk" // create with 63 characters - namespace1 := *newNamespace(longNameSpace1Name) - longNameSpace2Name := "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijl" // create with 63 characters - namespace2 := *newNamespace(longNameSpace2Name) - - fakeOvn.startWithDBSetup(libovsdb.TestSetup{NBData: getNodeSwitch(nodeName)}, - &v1.NamespaceList{ - Items: []v1.Namespace{ - namespace1, - namespace2, - }, + fakeOvn.startWithDBSetup(libovsdb.TestSetup{NBData: getNodeData(netInfo, nodeName)}, objs...) + bnc, asf := startBaseNetworkController(fakeOvn, nad) + + for _, tPod := range tPods { + tPod.populateControllerLogicalSwitchCache(bnc) + } + if nad != nil { + Expect(fakeOvn.controller.nadController.Start()).To(Succeed()) + } + + Expect(bnc.WatchNamespaces()).To(Succeed()) + Expect(bnc.WatchPods()).To(Succeed()) + ns, err := fakeOvn.fakeClient.KubeClient.CoreV1().Namespaces().Get(context.TODO(), namespace1.Name, metav1.GetOptions{}) + Expect(err).To(Succeed()) + Expect(ns).NotTo(BeNil()) + + // Enable multicast in the namespace + updateMulticast(fakeOvn, ns, true) + // calculate expected data + ports := []string{} + for _, tPod := range tPods { + ports = append(ports, tPod.portUUID) + } + expectedData := getMulticastPolicyExpectedData(netInfo, namespace1.Name, ports) + expectedData = append(expectedData, getExpectedPodsAndSwitches(bnc.NetInfo, tPods, []string{nodeName})...) + Eventually(fakeOvn.nbClient).Should(libovsdb.HaveData(expectedData...)) + asf.ExpectAddressSetWithAddresses(namespace1.Name, tPodIPs) + return nil + } + + err := app.Run([]string{app.Name}) + Expect(err).NotTo(HaveOccurred()) + }, + Entry("IPv4", true, false, nil), + Entry("IPv6", false, true, nil), + Entry("[Network Segmentation] IPv4", true, false, nadFromIPMode(true, false)), + Entry("[Network Segmentation] IPv6", false, true, nadFromIPMode(false, true)), + ) + + DescribeTable("tests enabling multicast in multiple namespaces with a long name > 42 characters", func(useIPv4, useIPv6 bool, nad *nadapi.NetworkAttachmentDefinition) { + app.Action = func(ctx *cli.Context) error { + config.IPv4Mode = useIPv4 + config.IPv6Mode = useIPv6 + + netInfo := getNetInfoFromNAD(nad) + longNameSpace1Name := "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijk" // create with 63 characters + namespace1 := *newNamespace(longNameSpace1Name) + longNameSpace2Name := "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijl" // create with 63 characters + namespace2 := *newNamespace(longNameSpace2Name) + node := newNodeWithNad(nad, networkName) + + objs := []runtime.Object{ + &v1.NamespaceList{ + Items: []v1.Namespace{ + namespace1, + namespace2, + }, + }, + &v1.NodeList{ + Items: []v1.Node{ + *node, }, - ) - setIpMode(m) - - err := fakeOvn.controller.WatchNamespaces() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - fakeOvn.controller.WatchPods() - ns1, err := fakeOvn.fakeClient.KubeClient.CoreV1().Namespaces().Get(context.TODO(), namespace1.Name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(ns1).NotTo(gomega.BeNil()) - ns2, err := fakeOvn.fakeClient.KubeClient.CoreV1().Namespaces().Get(context.TODO(), namespace2.Name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(ns2).NotTo(gomega.BeNil()) - - portsns1 := []string{} - expectedData := getMulticastPolicyExpectedData(longNameSpace1Name, portsns1) - acl := expectedData[0].(*nbdb.ACL) - // Post ACL indexing work, multicast ACL's don't have names - // We use externalIDs instead; so we can check if the expected IDs exist for the long namespace so that - // isEquivalent logic will be correct - gomega.Expect(acl.Name).To(gomega.BeNil()) - gomega.Expect(acl.ExternalIDs[libovsdbops.ObjectNameKey.String()]).To(gomega.Equal(longNameSpace1Name)) - expectedData = append(expectedData, getMulticastPolicyExpectedData(longNameSpace2Name, nil)...) - acl = expectedData[3].(*nbdb.ACL) - gomega.Expect(acl.Name).To(gomega.BeNil()) - gomega.Expect(acl.ExternalIDs[libovsdbops.ObjectNameKey.String()]).To(gomega.Equal(longNameSpace2Name)) - expectedData = append(expectedData, getDefaultNetExpectedPodsAndSwitches([]testPod{}, []string{"node1"})...) - // Enable multicast in the namespace. - updateMulticast(fakeOvn, ns1, true) - updateMulticast(fakeOvn, ns2, true) - gomega.Eventually(fakeOvn.nbClient).Should(libovsdb.HaveData(expectedData...)) - return nil + }, } + if nad != nil { + objs = append(objs, &nadapi.NetworkAttachmentDefinitionList{ + Items: []nadapi.NetworkAttachmentDefinition{*nad}, + }) + } + + fakeOvn.startWithDBSetup(libovsdb.TestSetup{NBData: getNodeData(netInfo, nodeName)}, objs...) + bnc, _ := startBaseNetworkController(fakeOvn, nad) + + if nad != nil { + Expect(fakeOvn.controller.nadController.Start()).To(Succeed()) + } + + Expect(bnc.WatchNamespaces()).To(Succeed()) + Expect(bnc.WatchPods()).To(Succeed()) + ns1, err := fakeOvn.fakeClient.KubeClient.CoreV1().Namespaces().Get(context.TODO(), namespace1.Name, metav1.GetOptions{}) + Expect(err).To(Succeed()) + Expect(ns1).NotTo(BeNil()) + ns2, err := fakeOvn.fakeClient.KubeClient.CoreV1().Namespaces().Get(context.TODO(), namespace2.Name, metav1.GetOptions{}) + Expect(err).To(Succeed()) + Expect(ns2).NotTo(BeNil()) + + portsns1 := []string{} + expectedData := getMulticastPolicyExpectedData(netInfo, longNameSpace1Name, portsns1) + acl := expectedData[0].(*nbdb.ACL) + // Post ACL indexing work, multicast ACL's don't have names + // We use externalIDs instead; so we can check if the expected IDs exist for the long namespace so that + // isEquivalent logic will be correct + Expect(acl.Name).To(BeNil()) + Expect(acl.ExternalIDs[libovsdbops.ObjectNameKey.String()]).To(Equal(longNameSpace1Name)) + expectedData = append(expectedData, getMulticastPolicyExpectedData(netInfo, longNameSpace2Name, nil)...) + acl = expectedData[3].(*nbdb.ACL) + Expect(acl.Name).To(BeNil()) + Expect(acl.ExternalIDs[libovsdbops.ObjectNameKey.String()]).To(Equal(longNameSpace2Name)) + expectedData = append(expectedData, getExpectedPodsAndSwitches(bnc.NetInfo, []testPod{}, []string{node.Name})...) + // Enable multicast in the namespace. + updateMulticast(fakeOvn, ns1, true) + updateMulticast(fakeOvn, ns2, true) + Eventually(fakeOvn.nbClient).Should(libovsdb.HaveData(expectedData...)) - err := app.Run([]string{app.Name}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) - - ginkgo.It("tests adding a pod to a multicast enabled namespace "+ipModeStr(m), func() { - app.Action = func(ctx *cli.Context) error { - namespace1 := *newNamespace(namespaceName1) - _, tPods, tPodIPs := createTestPods(nodeName, namespaceName1, m) - - ports := []string{} - for _, pod := range tPods { - ports = append(ports, pod.portUUID) - } - - fakeOvn.startWithDBSetup(libovsdb.TestSetup{NBData: getNodeSwitch(nodeName)}, - &v1.NamespaceList{ - Items: []v1.Namespace{ - namespace1, - }, + return nil + } + + err := app.Run([]string{app.Name}) + Expect(err).NotTo(HaveOccurred()) + }, + Entry("IPv4", true, false, nil), + Entry("IPv6", false, true, nil), + Entry("[Network Segmentation] IPv4", true, false, nadFromIPMode(true, false)), + Entry("[Network Segmentation] IPv6", false, true, nadFromIPMode(false, true)), + ) + + DescribeTable("tests adding a pod to a multicast enabled namespace", func(useIPv4, useIPv6 bool, nad *nadapi.NetworkAttachmentDefinition) { + app.Action = func(ctx *cli.Context) error { + config.IPv4Mode = useIPv4 + config.IPv6Mode = useIPv6 + + netInfo := getNetInfoFromNAD(nad) + namespace1 := *newNamespace(namespaceName1) + node := newNodeWithNad(nad, networkName) + _, tPods, tPodIPs := createTestPods(nodeName, namespaceName1, useIPv4, useIPv6) + + ports := []string{} + for _, pod := range tPods { + ports = append(ports, pod.portUUID) + } + + objs := []runtime.Object{ + &v1.NamespaceList{ + Items: []v1.Namespace{ + namespace1, }, - &v1.NodeList{ - Items: []v1.Node{ - *newNode("node1", "192.168.126.202/24"), - }, + }, + &v1.NodeList{ + Items: []v1.Node{ + *node, }, - ) - setIpMode(m) - - err := fakeOvn.controller.WatchNamespaces() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = fakeOvn.controller.WatchPods() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ns, err := fakeOvn.fakeClient.KubeClient.CoreV1().Namespaces().Get(context.TODO(), namespace1.Name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(ns).NotTo(gomega.BeNil()) - - // Enable multicast in the namespace. - updateMulticast(fakeOvn, ns, true) - // Check expected data without pods - expectedDataWithoutPods := getMulticastPolicyExpectedData(namespace1.Name, nil) - expectedDataWithoutPods = append(expectedDataWithoutPods, getNodeSwitch(nodeName)...) - gomega.Eventually(fakeOvn.nbClient).Should(libovsdb.HaveData(expectedDataWithoutPods)) - - // Create pods - for _, tPod := range tPods { - tPod.populateLogicalSwitchCache(fakeOvn) - _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(tPod.namespace).Create(context.TODO(), newPod( - tPod.namespace, tPod.podName, tPod.nodeName, tPod.podIP), metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - // Check pods were added - fakeOvn.asf.EventuallyExpectAddressSetWithAddresses(namespace1.Name, tPodIPs) - expectedDataWithPods := getMulticastPolicyExpectedData(namespace1.Name, ports) - expectedDataWithPods = append(expectedDataWithPods, getDefaultNetExpectedPodsAndSwitches(tPods, []string{nodeName})...) - gomega.Eventually(fakeOvn.nbClient).Should(libovsdb.HaveData(expectedDataWithPods...)) - - // Delete the pod from the namespace. - for _, tPod := range tPods { - err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(tPod.namespace).Delete(context.TODO(), - tPod.podName, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - fakeOvn.asf.EventuallyExpectEmptyAddressSetExist(namespace1.Name) - gomega.Eventually(fakeOvn.nbClient).Should(libovsdb.HaveData(expectedDataWithoutPods)) - - return nil + }, + } + if nad != nil { + objs = append(objs, &nadapi.NetworkAttachmentDefinitionList{ + Items: []nadapi.NetworkAttachmentDefinition{*nad}, + }) } - err := app.Run([]string{app.Name}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) - } + fakeOvn.startWithDBSetup(libovsdb.TestSetup{NBData: getNodeData(netInfo, nodeName)}, objs...) + bnc, asf := startBaseNetworkController(fakeOvn, nad) + + for _, tPod := range tPods { + tPod.populateControllerLogicalSwitchCache(bnc) + } + if nad != nil { + Expect(fakeOvn.controller.nadController.Start()).To(Succeed()) + } + + Expect(bnc.WatchNamespaces()).To(Succeed()) + Expect(bnc.WatchPods()).To(Succeed()) + ns, err := fakeOvn.fakeClient.KubeClient.CoreV1().Namespaces().Get(context.TODO(), namespace1.Name, metav1.GetOptions{}) + Expect(err).To(Succeed()) + Expect(ns).NotTo(BeNil()) + + // Enable multicast in the namespace. + updateMulticast(fakeOvn, ns, true) + // Check expected data without pods + expectedDataWithoutPods := getMulticastPolicyExpectedData(netInfo, namespace1.Name, nil) + expectedDataWithoutPods = append(expectedDataWithoutPods, getNodeData(netInfo, nodeName)...) + Eventually(fakeOvn.nbClient).Should(libovsdb.HaveData(expectedDataWithoutPods)) + + // Create pods + for _, tPod := range tPods { + tPod.populateControllerLogicalSwitchCache(bnc) + _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(tPod.namespace).Create(context.TODO(), newPod( + tPod.namespace, tPod.podName, tPod.nodeName, tPod.podIP), metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + } + + // Check pods were added + asf.EventuallyExpectAddressSetWithAddresses(namespace1.Name, tPodIPs) + expectedDataWithPods := getMulticastPolicyExpectedData(netInfo, namespace1.Name, ports) + expectedDataWithPods = append(expectedDataWithPods, getExpectedPodsAndSwitches(bnc, tPods, []string{nodeName})...) + Eventually(fakeOvn.nbClient).Should(libovsdb.HaveData(expectedDataWithPods...)) + + // Delete the pod from the namespace. + for _, tPod := range tPods { + err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(tPod.namespace).Delete(context.TODO(), + tPod.podName, *metav1.NewDeleteOptions(0)) + Expect(err).NotTo(HaveOccurred()) + } + asf.EventuallyExpectEmptyAddressSetExist(namespace1.Name) + Eventually(fakeOvn.nbClient).Should(libovsdb.HaveData(expectedDataWithoutPods)) + + return nil + } + + err := app.Run([]string{app.Name}) + Expect(err).NotTo(HaveOccurred()) + }, + Entry("IPv4", true, false, nil), + Entry("IPv6", false, true, nil), + Entry("[Network Segmentation] IPv4", true, false, nadFromIPMode(true, false)), + Entry("[Network Segmentation] IPv6", false, true, nadFromIPMode(false, true)), + ) }) }) diff --git a/go-controller/pkg/ovn/multihoming_test.go b/go-controller/pkg/ovn/multihoming_test.go index ea465379ba..b2adfe441c 100644 --- a/go-controller/pkg/ovn/multihoming_test.go +++ b/go-controller/pkg/ovn/multihoming_test.go @@ -8,7 +8,7 @@ import ( v1 "k8s.io/api/core/v1" - iputils "github.com/containernetworking/plugins/pkg/ip" + kubevirtv1 "kubevirt.io/api/core/v1" nadapi "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" @@ -19,6 +19,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" + ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" ovntypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" @@ -85,6 +86,7 @@ type secondaryNetworkExpectationMachine struct { pods []testPod gatewayConfig *util.L3GatewayConfig isInterconnectCluster bool + hasClusterPortGroup bool } func newSecondaryNetworkExpectationMachine(fakeOvn *FakeOVN, pods []testPod, opts ...option) *secondaryNetworkExpectationMachine { @@ -111,25 +113,36 @@ func withInterconnectCluster() option { } } -func (em *secondaryNetworkExpectationMachine) expectedLogicalSwitchesAndPorts() []libovsdbtest.TestData { +func withClusterPortGroup() option { + return func(machine *secondaryNetworkExpectationMachine) { + machine.hasClusterPortGroup = true + } +} + +func (em *secondaryNetworkExpectationMachine) expectedLogicalSwitchesAndPorts(isPrimary bool) []libovsdbtest.TestData { + return em.expectedLogicalSwitchesAndPortsWithLspEnabled(isPrimary, nil) +} + +func (em *secondaryNetworkExpectationMachine) expectedLogicalSwitchesAndPortsWithLspEnabled(isPrimary bool, expectedPodLspEnabled map[string]*bool) []libovsdbtest.TestData { data := []libovsdbtest.TestData{} for _, ocInfo := range em.fakeOvn.secondaryControllers { nodeslsps := make(map[string][]string) acls := make(map[string][]string) var switchName string + switchNodeMap := make(map[string]*nbdb.LogicalSwitch) + alreadyAddedManagementElements := make(map[string]struct{}) for _, pod := range em.pods { podInfo, ok := pod.secondaryPodInfos[ocInfo.bnc.GetNetworkName()] if !ok { continue } - - subnets := ocInfo.bnc.Subnets() + subnets := podInfo.nodeSubnet var ( - subnet config.CIDRNetworkEntry + subnet *net.IPNet hasSubnets bool ) if len(subnets) > 0 { - subnet = subnets[0] + subnet = ovntest.MustParseIPNet(subnets) hasSubnets = true } @@ -143,6 +156,9 @@ func (em *secondaryNetworkExpectationMachine) expectedLogicalSwitchesAndPorts() } podAddr := fmt.Sprintf("%s %s", portInfo.podMAC, portInfo.podIP) lsp := newExpectedSwitchPort(lspUUID, portName, podAddr, pod, ocInfo.bnc, nad) + if expectedPodLspEnabled != nil { + lsp.Enabled = expectedPodLspEnabled[pod.podName] + } if pod.noIfaceIdVer { delete(lsp.Options, "iface-id-ver") @@ -151,20 +167,22 @@ func (em *secondaryNetworkExpectationMachine) expectedLogicalSwitchesAndPorts() lsp.Options["requested-tnl-key"] = "1" // hardcode this for now. } data = append(data, lsp) + switch ocInfo.bnc.TopologyType() { case ovntypes.Layer3Topology: switchName = ocInfo.bnc.GetNetworkScopedName(pod.nodeName) - managementIP := managementPortIP(subnet.CIDR) + managementIP := managementPortIP(subnet) switchToRouterPortName := "stor-" + switchName switchToRouterPortUUID := switchToRouterPortName + "-UUID" data = append(data, newExpectedSwitchToRouterPort(switchToRouterPortUUID, switchToRouterPortName, pod, ocInfo.bnc, nad)) nodeslsps[switchName] = append(nodeslsps[switchName], switchToRouterPortUUID) - if em.gatewayConfig != nil { + if _, alreadyAdded := alreadyAddedManagementElements[pod.nodeName]; !alreadyAdded && + em.gatewayConfig != nil { mgmtPortName := managementPortName(switchName) mgmtPortUUID := mgmtPortName + "-UUID" - mgmtPort := expectedManagementPort(mgmtPortName, managementIP.String()) + mgmtPort := expectedManagementPort(mgmtPortName, managementIP) data = append(data, mgmtPort) nodeslsps[switchName] = append(nodeslsps[switchName], mgmtPortUUID) const aclUUID = "acl1-UUID" @@ -176,26 +194,20 @@ func (em *secondaryNetworkExpectationMachine) expectedLogicalSwitchesAndPorts() } case ovntypes.Layer2Topology: switchName = ocInfo.bnc.GetNetworkScopedName(ovntypes.OVNLayer2Switch) - managementIP := managementPortIP(subnet.CIDR) + managementIP := managementPortIP(subnet) - if em.gatewayConfig != nil { + if _, alreadyAdded := alreadyAddedManagementElements[pod.nodeName]; !alreadyAdded && + em.gatewayConfig != nil { // there are multiple mgmt ports in the cluster, thus the ports must be scoped with the node name mgmtPortName := managementPortName(ocInfo.bnc.GetNetworkScopedName(nodeName)) mgmtPortUUID := mgmtPortName + "-UUID" - mgmtPort := expectedManagementPort(mgmtPortName, managementIP.String()) + mgmtPort := expectedManagementPort(mgmtPortName, managementIP) data = append(data, mgmtPort) nodeslsps[switchName] = append(nodeslsps[switchName], mgmtPortUUID) - // there are multiple GRs in the cluster, thus their names must be scoped with the node name - gwRouterName := fmt.Sprintf( - "%s%s", - ovntypes.GWRouterPrefix, - ocInfo.bnc.GetNetworkScopedName(nodeName), - ) - networkSwitchToGWRouterLSPName := ovntypes.JoinSwitchToGWRouterPrefix + gwRouterName + networkSwitchToGWRouterLSPName := ovntypes.SwitchToRouterPrefix + switchName networkSwitchToGWRouterLSPUUID := networkSwitchToGWRouterLSPName + "-UUID" - - data = append(data, &nbdb.LogicalSwitchPort{ + lsp := &nbdb.LogicalSwitchPort{ UUID: networkSwitchToGWRouterLSPUUID, Name: networkSwitchToGWRouterLSPName, Addresses: []string{"router"}, @@ -203,9 +215,13 @@ func (em *secondaryNetworkExpectationMachine) expectedLogicalSwitchesAndPorts() "k8s.ovn.org/topology": ocInfo.bnc.TopologyType(), "k8s.ovn.org/network": ocInfo.bnc.GetNetworkName(), }, - Options: map[string]string{"router-port": ovntypes.GWRouterToJoinSwitchPrefix + gwRouterName}, + Options: map[string]string{"router-port": ovntypes.RouterToSwitchPrefix + switchName}, Type: "router", - }) + } + data = append(data, lsp) + if util.IsNetworkSegmentationSupportEnabled() && ocInfo.bnc.IsPrimaryNetwork() { + lsp.Options["requested-tnl-key"] = "25" + } nodeslsps[switchName] = append(nodeslsps[switchName], networkSwitchToGWRouterLSPUUID) const aclUUID = "acl1-UUID" @@ -222,8 +238,15 @@ func (em *secondaryNetworkExpectationMachine) expectedLogicalSwitchesAndPorts() var otherConfig map[string]string if hasSubnets { otherConfig = map[string]string{ - "exclude_ips": managementPortIP(subnet.CIDR).String(), - "subnet": subnet.CIDR.String(), + "subnet": subnet.String(), + } + if !ocInfo.bnc.IsPrimaryNetwork() { + // FIXME: This is weird that for secondary networks that don't have + // management ports these tests are expecting managementportIP to be + // excluded for no reason. + // FIXME2: Why are we setting exclude_ips on OVN switches when we don't + // even use OVN IPAMs. + otherConfig["exclude_ips"] = managementPortIP(subnet).String() } } @@ -234,23 +257,29 @@ func (em *secondaryNetworkExpectationMachine) expectedLogicalSwitchesAndPorts() otherConfig = nil } - data = append(data, &nbdb.LogicalSwitch{ - UUID: switchName + "-UUID", - Name: switchName, - Ports: nodeslsps[switchName], - ExternalIDs: map[string]string{ovntypes.NetworkExternalID: ocInfo.bnc.GetNetworkName()}, + switchNodeMap[switchName] = &nbdb.LogicalSwitch{ + UUID: switchName + "-UUID", + Name: switchName, + Ports: nodeslsps[switchName], + ExternalIDs: map[string]string{ + ovntypes.NetworkExternalID: ocInfo.bnc.GetNetworkName(), + ovntypes.NetworkRoleExternalID: util.GetUserDefinedNetworkRole(isPrimary), + }, OtherConfig: otherConfig, ACLs: acls[switchName], - }) - if em.gatewayConfig != nil { + } + + if _, alreadyAdded := alreadyAddedManagementElements[pod.nodeName]; !alreadyAdded && + em.gatewayConfig != nil { if ocInfo.bnc.TopologyType() == ovntypes.Layer3Topology { - data = append(data, expectedGWEntities(pod.nodeName, ocInfo.bnc, *em.gatewayConfig)...) - data = append(data, expectedLayer3EgressEntities(ocInfo.bnc, *em.gatewayConfig)...) + data = append(data, expectedGWEntities(pod.nodeName, subnets, ocInfo.bnc, *em.gatewayConfig)...) + data = append(data, expectedLayer3EgressEntities(ocInfo.bnc, *em.gatewayConfig, subnet)...) } else { data = append(data, expectedLayer2EgressEntities(ocInfo.bnc, *em.gatewayConfig, pod.nodeName)...) } } - if em.isInterconnectCluster && ocInfo.bnc.TopologyType() == ovntypes.Layer3Topology { + if _, alreadyAdded := alreadyAddedManagementElements[pod.nodeName]; !alreadyAdded && + em.isInterconnectCluster && ocInfo.bnc.TopologyType() == ovntypes.Layer3Topology { transitSwitchName := ocInfo.bnc.GetNetworkName() + "_transit_switch" data = append(data, &nbdb.LogicalSwitch{ UUID: transitSwitchName + "-UUID", @@ -264,9 +293,22 @@ func (em *secondaryNetworkExpectationMachine) expectedLogicalSwitchesAndPorts() }, }) } + if _, alreadyAdded := alreadyAddedManagementElements[pod.nodeName]; !alreadyAdded && + em.hasClusterPortGroup { + mgmtPortName := managementPortName(ocInfo.bnc.GetNetworkScopedName(nodeName)) + mgmtPortUUID := mgmtPortName + "-UUID" + + clusterPG := newNetworkClusterPortGroup(ocInfo.bnc) + clusterPG.Ports = []string{mgmtPortUUID} + data = append(data, clusterPG) + } + alreadyAddedManagementElements[pod.nodeName] = struct{}{} + } + for _, logicalSwitch := range switchNodeMap { + data = append(data, logicalSwitch) } - } + return data } @@ -295,7 +337,6 @@ func newExpectedSwitchToRouterPort(lspUUID string, portName string, pod testPod, lrp.ExternalIDs = nil lrp.Options = map[string]string{ "router-port": "rtos-isolatednet_test-node", - "arp_proxy": "0a:58:a9:fe:01:01 169.254.1.1 fe80::1 10.128.0.0/14", } lrp.PortSecurity = nil lrp.Type = "router" @@ -314,20 +355,21 @@ func managementPortName(switchName string) string { return fmt.Sprintf("k8s-%s", switchName) } -func expectedManagementPort(portName string, ip string) *nbdb.LogicalSwitchPort { +func expectedManagementPort(portName string, ip net.IP) *nbdb.LogicalSwitchPort { return &nbdb.LogicalSwitchPort{ UUID: portName + "-UUID", - Addresses: []string{fmt.Sprintf("02:03:04:05:06:07 %s", ip)}, + Addresses: []string{fmt.Sprintf("%s %s", util.IPAddrToHWAddr(ip).String(), ip.String())}, Name: portName, } } func gwRouterExternalIDs(netInfo util.NetInfo, gwConfig util.L3GatewayConfig) map[string]string { return map[string]string{ - ovntypes.NetworkExternalID: netInfo.GetNetworkName(), - ovntypes.TopologyExternalID: netInfo.TopologyType(), - "physical_ip": hostPhysicalIP(gwConfig), - "physical_ips": strings.Join(hostIPsFromGWConfig(gwConfig), ","), + ovntypes.NetworkExternalID: netInfo.GetNetworkName(), + ovntypes.NetworkRoleExternalID: getNetworkRole(netInfo), + ovntypes.TopologyExternalID: netInfo.TopologyType(), + "physical_ip": hostPhysicalIP(gwConfig), + "physical_ips": strings.Join(hostIPsFromGWConfig(gwConfig), ","), } } @@ -341,7 +383,7 @@ func hostPhysicalIP(gwConfig util.L3GatewayConfig) string { func hostIPsFromGWConfig(gwConfig util.L3GatewayConfig) []string { var hostIPs []string - for _, ip := range append(gwConfig.IPAddresses, dummyJoinIP()) { + for _, ip := range append(gwConfig.IPAddresses, dummyMasqueradeIP()) { hostIPs = append(hostIPs, ip.IP.String()) } return hostIPs @@ -381,19 +423,40 @@ func enableICFeatureConfig() *config.OVNKubernetesFeatureConfig { return featConfig } -func icClusterTestConfiguration() testConfiguration { - return testConfiguration{ +type testConfigOpt = func(*testConfiguration) + +func icClusterTestConfiguration(opts ...testConfigOpt) testConfiguration { + config := testConfiguration{ configToOverride: enableICFeatureConfig(), expectationOptions: []option{withInterconnectCluster()}, } + for _, opt := range opts { + opt(&config) + } + return config +} + +func nonICClusterTestConfiguration(opts ...testConfigOpt) testConfiguration { + config := testConfiguration{} + for _, opt := range opts { + opt(&config) + } + return config } -func nonICClusterTestConfiguration() testConfiguration { - return testConfiguration{} +func newMultiHomedKubevirtPod(vmName string, liveMigrationInfo liveMigrationPodInfo, testPod testPod, multiHomingConfigs ...secondaryNetInfo) *v1.Pod { + pod := newMultiHomedPod(testPod, multiHomingConfigs...) + pod.Labels[kubevirtv1.VirtualMachineNameLabel] = vmName + pod.Status.Phase = liveMigrationInfo.podPhase + for key, val := range liveMigrationInfo.annotation { + pod.Annotations[key] = val + } + pod.CreationTimestamp = liveMigrationInfo.creationTimestamp + return pod } -func newMultiHomedPod(namespace, name, node, podIP string, multiHomingConfigs ...secondaryNetInfo) *v1.Pod { - pod := newPod(namespace, name, node, podIP) +func newMultiHomedPod(testPod testPod, multiHomingConfigs ...secondaryNetInfo) *v1.Pod { + pod := newPod(testPod.namespace, testPod.podName, testPod.nodeName, testPod.podIP) var secondaryNetworks []nadapi.NetworkSelectionElement for _, multiHomingConf := range multiHomingConfigs { if multiHomingConf.isPrimary { @@ -415,7 +478,7 @@ func newMultiHomedPod(namespace, name, node, podIP string, multiHomingConfigs .. serializedNetworkSelectionElements, _ := json.Marshal(secondaryNetworks) pod.Annotations = map[string]string{nadapi.NetworkAttachmentAnnot: string(serializedNetworkSelectionElements)} if config.OVNKubernetesFeature.EnableInterconnect { - dummyOVNNetAnnotations := dummyOVNPodNetworkAnnotations(multiHomingConfigs) + dummyOVNNetAnnotations := dummyOVNPodNetworkAnnotations(testPod.secondaryPodInfos, multiHomingConfigs) if dummyOVNNetAnnotations != "{}" { pod.Annotations["k8s.ovn.org/pod-networks"] = dummyOVNNetAnnotations } @@ -423,7 +486,7 @@ func newMultiHomedPod(namespace, name, node, podIP string, multiHomingConfigs .. return pod } -func dummyOVNPodNetworkAnnotations(multiHomingConfigs []secondaryNetInfo) string { +func dummyOVNPodNetworkAnnotations(secondaryPodInfos map[string]*secondaryPodInfo, multiHomingConfigs []secondaryNetInfo) string { var ovnPodNetworksAnnotations []byte podAnnotations := map[string]podAnnotation{} for i, netConfig := range multiHomingConfigs { @@ -431,7 +494,8 @@ func dummyOVNPodNetworkAnnotations(multiHomingConfigs []secondaryNetInfo) string // for layer2 topology since allocating the annotation for this cluster configuration // is performed by cluster manager - which doesn't exist in the unit tests. if netConfig.topology == ovntypes.Layer2Topology { - podAnnotations[netConfig.nadName] = dummyOVNPodNetworkAnnotationForNetwork(netConfig, i+1) + portInfo := secondaryPodInfos[netConfig.netName].allportInfo[netConfig.nadName] + podAnnotations[netConfig.nadName] = dummyOVNPodNetworkAnnotationForNetwork(portInfo, netConfig, i+1) } } @@ -443,23 +507,25 @@ func dummyOVNPodNetworkAnnotations(multiHomingConfigs []secondaryNetInfo) string return string(ovnPodNetworksAnnotations) } -func dummyOVNPodNetworkAnnotationForNetwork(netConfig secondaryNetInfo, tunnelID int) podAnnotation { +func dummyOVNPodNetworkAnnotationForNetwork(portInfo portInfo, netConfig secondaryNetInfo, tunnelID int) podAnnotation { role := ovntypes.NetworkRoleSecondary if netConfig.isPrimary { role = ovntypes.NetworkRolePrimary } - var ( - gateways []string - ips []string - ) - for _, subnetStr := range strings.Split(netConfig.subnets, ",") { + var gateways []string + for _, subnetStr := range strings.Split(netConfig.clustersubnets, ",") { subnet := testing.MustParseIPNet(subnetStr) - ips = append(ips, GetWorkloadSecondaryNetworkDummyIP(subnet).String()) gateways = append(gateways, util.GetNodeGatewayIfAddr(subnet).IP.String()) } + ip := testing.MustParseIP(portInfo.podIP) + _, maskSize := util.GetIPFullMask(ip).Size() + ipNet := net.IPNet{ + IP: ip, + Mask: net.CIDRMask(portInfo.prefixLen, maskSize), + } return podAnnotation{ - IPs: ips, - MAC: util.IPAddrToHWAddr(testing.MustParseIPNet(ips[0]).IP).String(), + IPs: []string{ipNet.String()}, + MAC: util.IPAddrToHWAddr(ip).String(), Gateways: gateways, Routes: nil, // TODO: must add here the expected routes. TunnelID: tunnelID, @@ -467,16 +533,6 @@ func dummyOVNPodNetworkAnnotationForNetwork(netConfig secondaryNetInfo, tunnelID } } -// GetWorkloadSecondaryNetworkDummyIP returns the workload logical switch port -// address (the ".3" address), return nil if the subnet is invalid -func GetWorkloadSecondaryNetworkDummyIP(subnet *net.IPNet) *net.IPNet { - mgmtIfAddr := util.GetNodeManagementIfAddr(subnet) - if mgmtIfAddr == nil { - return nil - } - return &net.IPNet{IP: iputils.NextIP(mgmtIfAddr.IP), Mask: subnet.Mask} -} - // Internal struct used to marshal PodAnnotation to the pod annotationç // Copied from pkg/util/pod_annotation.go type podAnnotation struct { diff --git a/go-controller/pkg/ovn/multipolicy_test.go b/go-controller/pkg/ovn/multipolicy_test.go index 01ffa21451..899dd34c19 100644 --- a/go-controller/pkg/ovn/multipolicy_test.go +++ b/go-controller/pkg/ovn/multipolicy_test.go @@ -9,8 +9,8 @@ import ( cnitypes "github.com/containernetworking/cni/pkg/types" mnpapi "github.com/k8snetworkplumbingwg/multi-networkpolicy/pkg/apis/k8s.cni.cncf.io/v1beta1" nettypes "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" - "github.com/onsi/ginkgo" - "github.com/onsi/ginkgo/extensions/table" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" "github.com/onsi/gomega/format" "github.com/urfave/cli/v2" @@ -120,7 +120,7 @@ func (p testPod) populateSecondaryNetworkLogicalSwitchCache(fakeOvn *FakeOVN, oc gomega.Expect(err).NotTo(gomega.HaveOccurred()) } -func getExpectedDataPodsAndSwitchesForSecondaryNetwork(fakeOvn *FakeOVN, pods []testPod) []libovsdb.TestData { +func getExpectedDataPodsAndSwitchesForSecondaryNetwork(fakeOvn *FakeOVN, pods []testPod, netInfo util.NetInfo) []libovsdb.TestData { data := []libovsdb.TestData{} for _, ocInfo := range fakeOvn.secondaryControllers { nodeslsps := make(map[string][]string) @@ -172,10 +172,13 @@ func getExpectedDataPodsAndSwitchesForSecondaryNetwork(fakeOvn *FakeOVN, pods [] nodeslsps[switchName] = append(nodeslsps[switchName], lspUUID) } data = append(data, &nbdb.LogicalSwitch{ - UUID: switchName + "-UUID", - Name: switchName, - Ports: nodeslsps[switchName], - ExternalIDs: map[string]string{ovntypes.NetworkExternalID: ocInfo.bnc.GetNetworkName()}, + UUID: switchName + "-UUID", + Name: switchName, + Ports: nodeslsps[switchName], + ExternalIDs: map[string]string{ + ovntypes.NetworkExternalID: ocInfo.bnc.GetNetworkName(), + ovntypes.NetworkRoleExternalID: getNetworkRole(netInfo), + }, }) } } @@ -264,15 +267,21 @@ var _ = ginkgo.Describe("OVN MultiNetworkPolicy Operations", func() { switch topology { case ovntypes.Layer2Topology: initialDB.NBData = append(initialDB.NBData, &nbdb.LogicalSwitch{ - Name: netInfo.GetNetworkScopedName(ovntypes.OVNLayer2Switch), - UUID: netInfo.GetNetworkScopedName(ovntypes.OVNLayer2Switch) + "_UUID", - ExternalIDs: map[string]string{ovntypes.NetworkExternalID: secondaryNetworkName}, + Name: netInfo.GetNetworkScopedName(ovntypes.OVNLayer2Switch), + UUID: netInfo.GetNetworkScopedName(ovntypes.OVNLayer2Switch) + "_UUID", + ExternalIDs: map[string]string{ + ovntypes.NetworkExternalID: secondaryNetworkName, + ovntypes.NetworkRoleExternalID: getNetworkRole(netInfo), + }, }) case ovntypes.LocalnetTopology: initialDB.NBData = append(initialDB.NBData, &nbdb.LogicalSwitch{ - Name: netInfo.GetNetworkScopedName(ovntypes.OVNLocalnetSwitch), - UUID: netInfo.GetNetworkScopedName(ovntypes.OVNLocalnetSwitch) + "_UUID", - ExternalIDs: map[string]string{ovntypes.NetworkExternalID: secondaryNetworkName}, + Name: netInfo.GetNetworkScopedName(ovntypes.OVNLocalnetSwitch), + UUID: netInfo.GetNetworkScopedName(ovntypes.OVNLocalnetSwitch) + "_UUID", + ExternalIDs: map[string]string{ + ovntypes.NetworkExternalID: secondaryNetworkName, + ovntypes.NetworkRoleExternalID: getNetworkRole(netInfo), + }, }) } } @@ -383,7 +392,7 @@ var _ = ginkgo.Describe("OVN MultiNetworkPolicy Operations", func() { getUpdatedInitialDB := func(tPods []testPod) []libovsdb.TestData { updatedSwitchAndPods := getDefaultNetExpectedPodsAndSwitches(tPods, []string{nodeName}) - secondarySwitchAndPods := getExpectedDataPodsAndSwitchesForSecondaryNetwork(fakeOvn, tPods) + secondarySwitchAndPods := getExpectedDataPodsAndSwitchesForSecondaryNetwork(fakeOvn, tPods, netInfo) if len(secondarySwitchAndPods) != 0 { updatedSwitchAndPods = append(updatedSwitchAndPods, secondarySwitchAndPods...) } @@ -523,7 +532,7 @@ var _ = ginkgo.Describe("OVN MultiNetworkPolicy Operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) - table.DescribeTable("correctly adds and deletes pod IPs from secondary network namespace address set", + ginkgo.DescribeTable("correctly adds and deletes pod IPs from secondary network namespace address set", func(topology string, remote bool) { app.Action = func(ctx *cli.Context) error { var err error @@ -535,7 +544,7 @@ var _ = ginkgo.Describe("OVN MultiNetworkPolicy Operations", func() { nodeSubnet = "10.1.1.0/24" } - setSecondaryNetworkTestData(topology, subnets) + setSecondaryNetworkTestData(topology, subnets) // here I set network role if layer2 watchNodes := true node := *newNode(nodeName, "192.168.126.202/24") @@ -614,12 +623,12 @@ var _ = ginkgo.Describe("OVN MultiNetworkPolicy Operations", func() { err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }, - table.Entry("on local zone for layer3 topology", ovntypes.Layer3Topology, false), - table.Entry("on remote zone for layer3 topology", ovntypes.Layer3Topology, true), - table.Entry("on local zone for layer2 topology", ovntypes.Layer2Topology, false), - table.Entry("on remote zone for layer2 topology", ovntypes.Layer2Topology, true), - table.Entry("on local zone for localnet topology", ovntypes.LocalnetTopology, false), - table.Entry("on remote zone for localnet topology", ovntypes.LocalnetTopology, true), + ginkgo.Entry("on local zone for layer3 topology", ovntypes.Layer3Topology, false), + ginkgo.Entry("on remote zone for layer3 topology", ovntypes.Layer3Topology, true), + ginkgo.Entry("on local zone for layer2 topology", ovntypes.Layer2Topology, false), + ginkgo.Entry("on remote zone for layer2 topology", ovntypes.Layer2Topology, true), + ginkgo.Entry("on local zone for localnet topology", ovntypes.LocalnetTopology, false), + ginkgo.Entry("on remote zone for localnet topology", ovntypes.LocalnetTopology, true), ) }) }) diff --git a/go-controller/pkg/ovn/namespace.go b/go-controller/pkg/ovn/namespace.go index a804492a6e..54a4408e91 100644 --- a/go-controller/pkg/ovn/namespace.go +++ b/go-controller/pkg/ovn/namespace.go @@ -7,7 +7,6 @@ import ( "github.com/ovn-org/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" - libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" utilerrors "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/errors" @@ -59,7 +58,6 @@ func (oc *DefaultNetworkController) getRoutingPodGWs(nsInfo *namespaceInfo) map[ // addLocalPodToNamespace returns pod's routing gateway info and the ops needed // to add pod's IP to the namespace's address set and port group. func (oc *DefaultNetworkController) addLocalPodToNamespace(ns string, ips []*net.IPNet, portUUID string) (*gatewayInfo, map[string]gatewayInfo, []ovsdb.Operation, error) { - var ops []ovsdb.Operation var err error nsInfo, nsUnlock, err := oc.ensureNamespaceLocked(ns, true, nil) if err != nil { @@ -68,16 +66,10 @@ func (oc *DefaultNetworkController) addLocalPodToNamespace(ns string, ips []*net defer nsUnlock() - if ops, err = nsInfo.addressSet.AddAddressesReturnOps(util.IPNetsIPToStringSlice(ips)); err != nil { + ops, err := oc.addLocalPodToNamespaceLocked(nsInfo, ips, portUUID) + if err != nil { return nil, nil, nil, err } - - if nsInfo.portGroupName != "" { - if ops, err = libovsdbops.AddPortsToPortGroupOps(oc.nbClient, ops, nsInfo.portGroupName, portUUID); err != nil { - return nil, nil, nil, err - } - } - return oc.getRoutingExternalGWs(nsInfo), oc.getRoutingPodGWs(nsInfo), ops, nil } diff --git a/go-controller/pkg/ovn/namespace_test.go b/go-controller/pkg/ovn/namespace_test.go index bc434c22ae..31e380159b 100644 --- a/go-controller/pkg/ovn/namespace_test.go +++ b/go-controller/pkg/ovn/namespace_test.go @@ -26,7 +26,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/kubernetes" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" ) @@ -67,8 +67,12 @@ func newNamespace(namespace string) *v1.Namespace { } } -func getNsAddrSetHashNames(ns string) (string, string) { - return addressset.GetHashNamesForAS(getNamespaceAddrSetDbIDs(ns, DefaultNetworkControllerName)) +func getDefaultNetNsAddrSetHashNames(ns string) (string, string) { + return getNsAddrSetHashNames(DefaultNetworkControllerName, ns) +} + +func getNsAddrSetHashNames(netControllerName, ns string) (string, string) { + return addressset.GetHashNamesForAS(getNamespaceAddrSetDbIDs(ns, netControllerName)) } func buildNamespaceAddressSets(namespace string, ips []string) (*nbdb.AddressSet, *nbdb.AddressSet) { diff --git a/go-controller/pkg/ovn/network_segmentation_test.go b/go-controller/pkg/ovn/network_segmentation_test.go index b4e73c8eca..a517365e7e 100644 --- a/go-controller/pkg/ovn/network_segmentation_test.go +++ b/go-controller/pkg/ovn/network_segmentation_test.go @@ -11,7 +11,7 @@ import ( v1 "k8s.io/api/core/v1" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "github.com/urfave/cli/v2" ) diff --git a/go-controller/pkg/ovn/ovn.go b/go-controller/pkg/ovn/ovn.go index 7ccb59721b..04fbbebcda 100644 --- a/go-controller/pkg/ovn/ovn.go +++ b/go-controller/pkg/ovn/ovn.go @@ -176,8 +176,27 @@ func (oc *DefaultNetworkController) ensureLocalZonePod(oldPod, pod *kapi.Pod, ad } } + // update open ports for UDN pods on pod update. + if util.IsNetworkSegmentationSupportEnabled() && !util.PodWantsHostNetwork(pod) && !addPort && + pod != nil && oldPod != nil && + pod.Annotations[util.UDNOpenPortsAnnotationName] != oldPod.Annotations[util.UDNOpenPortsAnnotationName] { + networkRole, err := oc.GetNetworkRole(pod) + if err != nil { + return err + } + if networkRole != ovntypes.NetworkRolePrimary { + // only update for non-default network pods + portName := oc.GetLogicalPortName(pod, oc.GetNetworkName()) + err := oc.setUDNPodOpenPorts(pod.Namespace+"/"+pod.Name, pod.Annotations, portName) + if err != nil { + return fmt.Errorf("failed to update UDN pod %s/%s open ports: %w", pod.Namespace, pod.Name, err) + } + } + } + if kubevirt.IsPodLiveMigratable(pod) { - return kubevirt.EnsureLocalZonePodAddressesToNodeRoute(oc.watchFactory, oc.nbClient, oc.lsManager, pod, ovntypes.DefaultNetworkName) + v4Subnets, v6Subnets := util.GetClusterSubnetsWithHostPrefix() + return kubevirt.EnsureLocalZonePodAddressesToNodeRoute(oc.watchFactory, oc.nbClient, oc.lsManager, pod, ovntypes.DefaultNetworkName, append(v4Subnets, v6Subnets...)) } return nil @@ -406,9 +425,15 @@ func macAddressChanged(oldNode, node *kapi.Node, netName string) bool { return !bytes.Equal(oldMacAddress, macAddress) } -func nodeSubnetChanged(oldNode, node *kapi.Node) bool { - oldSubnets, _ := util.ParseNodeHostSubnetAnnotation(oldNode, ovntypes.DefaultNetworkName) - newSubnets, _ := util.ParseNodeHostSubnetAnnotation(node, ovntypes.DefaultNetworkName) +func nodeSubnetChanged(oldNode, node *kapi.Node, netName string) bool { + oldSubnets, _ := util.ParseNodeHostSubnetAnnotation(oldNode, netName) + newSubnets, _ := util.ParseNodeHostSubnetAnnotation(node, netName) + return !reflect.DeepEqual(oldSubnets, newSubnets) +} + +func joinCIDRChanged(oldNode, node *kapi.Node, netName string) bool { + oldSubnets, _ := util.ParseNodeGatewayRouterJoinNetwork(oldNode, netName) + newSubnets, _ := util.ParseNodeGatewayRouterJoinNetwork(node, netName) return !reflect.DeepEqual(oldSubnets, newSubnets) } @@ -444,7 +469,6 @@ func shouldUpdateNode(node, oldNode *kapi.Node) (bool, error) { } func (oc *DefaultNetworkController) StartServiceController(wg *sync.WaitGroup, runRepair bool) error { - klog.Infof("Starting OVN Service Controller: Using Endpoint Slices") wg.Add(1) go func() { defer wg.Done() @@ -462,13 +486,19 @@ func (oc *DefaultNetworkController) StartServiceController(wg *sync.WaitGroup, r func (oc *DefaultNetworkController) InitEgressServiceZoneController() (*egresssvc_zone.Controller, error) { // If the EgressIP controller is enabled it will take care of creating the // "no reroute" policies - we can pass "noop" functions to the egress service controller. - initClusterEgressPolicies := func(libovsdbclient.Client, addressset.AddressSetFactory, string, string) error { return nil } - ensureNodeNoReroutePolicies := func(libovsdbclient.Client, addressset.AddressSetFactory, string, string, listers.NodeLister) error { + initClusterEgressPolicies := func(nbClient libovsdbclient.Client, addressSetFactory addressset.AddressSetFactory, ni util.NetInfo, + clusterSubnets []*net.IPNet, controllerName string) error { + return nil + } + ensureNodeNoReroutePolicies := func(nbClient libovsdbclient.Client, addressSetFactory addressset.AddressSetFactory, + network, router, controller string, nodeLister listers.NodeLister, v4, v6 bool) error { return nil } deleteLegacyDefaultNoRerouteNodePolicies := func(libovsdbclient.Client, string, string) error { return nil } // used only when IC=true - createDefaultNodeRouteToExternal := func(libovsdbclient.Client, string, string) error { return nil } + createDefaultNodeRouteToExternal := func(nbClient libovsdbclient.Client, clusterRouter, gwRouterName string, clusterSubnets []config.CIDRNetworkEntry) error { + return nil + } if !config.OVNKubernetesFeature.EnableEgressIP { initClusterEgressPolicies = InitClusterEgressPolicies @@ -500,6 +530,7 @@ func (oc *DefaultNetworkController) newANPController() error { oc.isPodScheduledinLocalZone, oc.zone, oc.recorder, + oc.observManager, ) return err } diff --git a/go-controller/pkg/ovn/ovn_suite_test.go b/go-controller/pkg/ovn/ovn_suite_test.go index dd1778af14..9ba756a2d0 100644 --- a/go-controller/pkg/ovn/ovn_suite_test.go +++ b/go-controller/pkg/ovn/ovn_suite_test.go @@ -3,7 +3,7 @@ package ovn import ( "testing" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/ovn/ovn_test.go b/go-controller/pkg/ovn/ovn_test.go index 44f1f7f5da..1ad919d950 100644 --- a/go-controller/pkg/ovn/ovn_test.go +++ b/go-controller/pkg/ovn/ovn_test.go @@ -6,7 +6,7 @@ import ( "fmt" "sync" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" fakeipamclaimclient "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake" mnpapi "github.com/k8snetworkplumbingwg/multi-networkpolicy/pkg/apis/k8s.cni.cncf.io/v1beta1" @@ -17,6 +17,7 @@ import ( ocpnetworkapiv1alpha1 "github.com/openshift/api/network/v1alpha1" ocpnetworkfake "github.com/openshift/client-go/network/clientset/versioned/fake" libovsdbclient "github.com/ovn-org/libovsdb/client" + ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" adminpolicybasedrouteapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1" @@ -31,6 +32,16 @@ import ( egressservice "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1" egressservicefake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/clientset/versioned/fake" udnclientfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/fake" + nad "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/network-attach-def-controller" + fakenad "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/nad" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/record" + anpapi "sigs.k8s.io/network-policy-api/apis/v1alpha1" + anpfake "sigs.k8s.io/network-policy-api/pkg/client/clientset/versioned/fake" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" @@ -42,13 +53,6 @@ import ( libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" util "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/kubernetes/fake" - "k8s.io/client-go/tools/record" - anpapi "sigs.k8s.io/network-policy-api/apis/v1alpha1" - anpfake "sigs.k8s.io/network-policy-api/pkg/client/clientset/versioned/fake" ) const ( @@ -70,21 +74,23 @@ type secondaryControllerInfo struct { } type FakeOVN struct { - fakeClient *util.OVNMasterClientset - watcher *factory.WatchFactory - controller *DefaultNetworkController - stopChan chan struct{} - wg *sync.WaitGroup - asf *addressset.FakeAddressSetFactory - fakeRecorder *record.FakeRecorder - nbClient libovsdbclient.Client - sbClient libovsdbclient.Client - dbSetup libovsdbtest.TestSetup - nbsbCleanup *libovsdbtest.Context - egressQoSWg *sync.WaitGroup - egressSVCWg *sync.WaitGroup - anpWg *sync.WaitGroup - + fakeClient *util.OVNMasterClientset + watcher *factory.WatchFactory + controller *DefaultNetworkController + stopChan chan struct{} + wg *sync.WaitGroup + asf *addressset.FakeAddressSetFactory + fakeRecorder *record.FakeRecorder + nbClient libovsdbclient.Client + sbClient libovsdbclient.Client + dbSetup libovsdbtest.TestSetup + nbsbCleanup *libovsdbtest.Context + egressQoSWg *sync.WaitGroup + egressSVCWg *sync.WaitGroup + anpWg *sync.WaitGroup + nadController *nad.NetAttachDefinitionController + eIPController *EgressIPController + portCache *PortCache // information map of all secondary network controllers secondaryControllers map[string]secondaryControllerInfo } @@ -200,26 +206,38 @@ func (o *FakeOVN) init(nadList []nettypes.NetworkAttachmentDefinition) { o.nbClient, o.sbClient, o.nbsbCleanup, err = libovsdbtest.NewNBSBTestHarness(o.dbSetup) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + o.portCache = NewPortCache(o.stopChan) + kubeOVN := &kube.KubeOVN{ + Kube: kube.Kube{KClient: o.fakeClient.KubeClient}, + EIPClient: o.fakeClient.EgressIPClient, + } + o.eIPController = NewEIPController(o.nbClient, kubeOVN, o.watcher, + o.fakeRecorder, o.portCache, o.nadController, o.asf, config.IPv4Mode, config.IPv6Mode, "", DefaultNetworkControllerName) + if o.asf == nil { + o.eIPController.addressSetFactory = addressset.NewOvnAddressSetFactory(o.nbClient, config.IPv4Mode, config.IPv6Mode) + } o.stopChan = make(chan struct{}) o.wg = &sync.WaitGroup{} o.controller, err = NewOvnController(o.fakeClient, o.watcher, o.stopChan, o.asf, o.nbClient, o.sbClient, - o.fakeRecorder, o.wg) + o.fakeRecorder, o.wg, + o.eIPController, o.portCache) gomega.Expect(err).NotTo(gomega.HaveOccurred()) o.controller.multicastSupport = config.EnableMulticast - + o.nadController = o.controller.nadController.(*nad.NetAttachDefinitionController) + o.eIPController.nadController = o.controller.nadController.(*nad.NetAttachDefinitionController) + o.eIPController.zone = o.controller.zone setupCOPP := false setupClusterController(o.controller, setupCOPP) + for _, n := range nadList { + err := o.NewSecondaryNetworkController(&n) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } err = o.watcher.Start() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - for _, nad := range nadList { - err := o.NewSecondaryNetworkController(&nad) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - existingNodes, err := o.controller.kube.GetNodes() if err == nil { for _, node := range existingNodes { @@ -231,6 +249,7 @@ func (o *FakeOVN) init(nadList []nettypes.NetworkAttachmentDefinition) { } } } + } func setupClusterController(clusterController *DefaultNetworkController, setupCOPP bool) { @@ -278,7 +297,8 @@ func resetNBClient(ctx context.Context, nbClient libovsdbclient.Client) { // infrastructure and policy func NewOvnController(ovnClient *util.OVNMasterClientset, wf *factory.WatchFactory, stopChan chan struct{}, addressSetFactory addressset.AddressSetFactory, libovsdbOvnNBClient libovsdbclient.Client, - libovsdbOvnSBClient libovsdbclient.Client, recorder record.EventRecorder, wg *sync.WaitGroup) (*DefaultNetworkController, error) { + libovsdbOvnSBClient libovsdbclient.Client, recorder record.EventRecorder, wg *sync.WaitGroup, + eIPController *EgressIPController, portCache *PortCache) (*DefaultNetworkController, error) { fakeAddr, ok := addressSetFactory.(*addressset.FakeAddressSetFactory) if addressSetFactory == nil || (ok && fakeAddr == nil) { @@ -321,7 +341,14 @@ func NewOvnController(ovnClient *util.OVNMasterClientset, wf *factory.WatchFacto return nil, err } - dnc, err := newDefaultNetworkControllerCommon(cnci, stopChan, wg, addressSetFactory) + var nadController *nad.NetAttachDefinitionController + if config.OVNKubernetesFeature.EnableMultiNetwork { + nadController, err = nad.NewNetAttachDefinitionController("test", &fakenad.FakeNetworkControllerManager{}, wf, nil) + if err != nil { + return nil, err + } + } + dnc, err := newDefaultNetworkControllerCommon(cnci, stopChan, wg, addressSetFactory, nadController, nil, portCache, eIPController) gomega.Expect(err).NotTo(gomega.HaveOccurred()) if nbZoneFailed { @@ -435,23 +462,32 @@ func (o *FakeOVN) NewSecondaryNetworkController(netattachdef *nettypes.NetworkAt if err != nil { return err } + asf := addressset.NewFakeAddressSetFactory(getNetworkControllerName(netName)) switch topoType { case types.Layer3Topology: - l3Controller := NewSecondaryLayer3NetworkController(cnci, nInfo) - l3Controller.addressSetFactory = asf + l3Controller, err := NewSecondaryLayer3NetworkController(cnci, nInfo, o.nadController, o.eIPController, o.portCache) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if o.asf != nil { // use fake asf only when enabled + l3Controller.addressSetFactory = asf + } secondaryController = &l3Controller.BaseSecondaryNetworkController case types.Layer2Topology: - l2Controller := NewSecondaryLayer2NetworkController(cnci, nInfo) - l2Controller.addressSetFactory = asf + l2Controller, err := NewSecondaryLayer2NetworkController(cnci, nInfo, o.nadController) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if o.asf != nil { // use fake asf only when enabled + l2Controller.addressSetFactory = asf + } secondaryController = &l2Controller.BaseSecondaryNetworkController case types.LocalnetTopology: - localnetController := NewSecondaryLocalnetNetworkController(cnci, nInfo) - localnetController.addressSetFactory = asf + localnetController := NewSecondaryLocalnetNetworkController(cnci, nInfo, o.nadController) + if o.asf != nil { // use fake asf only when enabled + localnetController.addressSetFactory = asf + } secondaryController = &localnetController.BaseSecondaryNetworkController default: - return fmt.Errorf("topoloty type %s not supported", topoType) + return fmt.Errorf("topology type %s not supported", topoType) } ocInfo = secondaryControllerInfo{bnc: secondaryController, asf: asf} o.secondaryControllers[netName] = ocInfo @@ -481,7 +517,7 @@ func (o *FakeOVN) patchEgressIPObj(nodeName, egressIPName, egressIP, network str EgressIP: egressIP, }, } - err := o.controller.patchReplaceEgressIPStatus(egressIPName, status) + err := o.controller.eIPC.patchReplaceEgressIPStatus(egressIPName, status) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } diff --git a/go-controller/pkg/ovn/pod_selector_address_set_test.go b/go-controller/pkg/ovn/pod_selector_address_set_test.go index 0cee72719d..dd2ce8c9d4 100644 --- a/go-controller/pkg/ovn/pod_selector_address_set_test.go +++ b/go-controller/pkg/ovn/pod_selector_address_set_test.go @@ -6,8 +6,8 @@ import ( "runtime" "time" - "github.com/onsi/ginkgo" - "github.com/onsi/ginkgo/extensions/table" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" @@ -171,7 +171,7 @@ var _ = ginkgo.Describe("OVN PodSelectorAddressSet", func() { // expect namespace and peer address sets only fakeOvn.asf.ExpectNumberOfAddressSets(2) }) - table.DescribeTable("adds selected pod ips to the address set", + ginkgo.DescribeTable("adds selected pod ips to the address set", func(peer knet.NetworkPolicyPeer, staticNamespace string, addrSetIPs []string) { namespace1 := *newNamespace(namespaceName1) namespace2 := *newNamespace(namespaceName2) @@ -200,21 +200,21 @@ var _ = ginkgo.Describe("OVN PodSelectorAddressSet", func() { peerASIDs := getPodSelectorAddrSetDbIDs(peerASKey, DefaultNetworkControllerName) fakeOvn.asf.ExpectAddressSetWithAddresses(peerASIDs, addrSetIPs) }, - table.Entry("all pods from a static namespace", knet.NetworkPolicyPeer{ + ginkgo.Entry("all pods from a static namespace", knet.NetworkPolicyPeer{ PodSelector: &metav1.LabelSelector{}, NamespaceSelector: nil, }, namespaceName1, []string{ip1, ip2}), - table.Entry("selected pods from a static namespace", knet.NetworkPolicyPeer{ + ginkgo.Entry("selected pods from a static namespace", knet.NetworkPolicyPeer{ PodSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{podLabelKey: "ns1pod1"}, }, NamespaceSelector: nil, }, namespaceName1, []string{ip1}), - table.Entry("all pods from all namespaces", knet.NetworkPolicyPeer{ + ginkgo.Entry("all pods from all namespaces", knet.NetworkPolicyPeer{ PodSelector: &metav1.LabelSelector{}, NamespaceSelector: &metav1.LabelSelector{}, }, namespaceName1, []string{ip1, ip2, ip3, ip4}), - table.Entry("selected pods from all namespaces", knet.NetworkPolicyPeer{ + ginkgo.Entry("selected pods from all namespaces", knet.NetworkPolicyPeer{ PodSelector: &metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{ { @@ -226,7 +226,7 @@ var _ = ginkgo.Describe("OVN PodSelectorAddressSet", func() { }, NamespaceSelector: &metav1.LabelSelector{}, }, namespaceName1, []string{ip1, ip3}), - table.Entry("all pods from selected namespaces", knet.NetworkPolicyPeer{ + ginkgo.Entry("all pods from selected namespaces", knet.NetworkPolicyPeer{ PodSelector: &metav1.LabelSelector{}, NamespaceSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -234,7 +234,7 @@ var _ = ginkgo.Describe("OVN PodSelectorAddressSet", func() { }, }, }, namespaceName1, []string{ip3, ip4}), - table.Entry("selected pods from selected namespaces", knet.NetworkPolicyPeer{ + ginkgo.Entry("selected pods from selected namespaces", knet.NetworkPolicyPeer{ PodSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{podLabelKey: "ns2pod1"}, }, diff --git a/go-controller/pkg/ovn/pods.go b/go-controller/pkg/ovn/pods.go index e153ac3986..3f9da4bd8a 100644 --- a/go-controller/pkg/ovn/pods.go +++ b/go-controller/pkg/ovn/pods.go @@ -9,6 +9,10 @@ import ( nadapi "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" "github.com/ovn-org/libovsdb/ovsdb" + kapi "k8s.io/api/core/v1" + ktypes "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" + hotypes "github.com/ovn-org/ovn-kubernetes/go-controller/hybrid-overlay/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kubevirt" @@ -19,9 +23,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" ovntypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" - kapi "k8s.io/api/core/v1" - ktypes "k8s.io/apimachinery/pkg/types" - "k8s.io/klog/v2" + utilerrors "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/errors" ) func (oc *DefaultNetworkController) syncPods(pods []interface{}) error { @@ -183,6 +185,15 @@ func (oc *DefaultNetworkController) deleteLogicalPort(pod *kapi.Pod, portInfo *l return err } + // delete open port ACLs for UDN pods + if util.IsNetworkSegmentationSupportEnabled() { + // safe to call for non-UDN pods + err = oc.setUDNPodOpenPorts(pod.Namespace+"/"+pod.Name, pod.Annotations, "") + if err != nil { + return fmt.Errorf("failed to cleanup UDN pod %s/%s open ports: %w", pod.Namespace, pod.Name, err) + } + } + // do not remove SNATs/GW routes/IPAM for an IP address unless we have validated no other pod is using it if pInfo == nil { return nil @@ -243,7 +254,7 @@ func (oc *DefaultNetworkController) addLogicalPort(pod *kapi.Pod) (err error) { }() nadName := ovntypes.DefaultNetworkName - ops, lsp, podAnnotation, newlyCreatedPort, err = oc.addLogicalPortToNetwork(pod, nadName, network) + ops, lsp, podAnnotation, newlyCreatedPort, err = oc.addLogicalPortToNetwork(pod, nadName, network, nil) if err != nil { return err } @@ -258,6 +269,13 @@ func (oc *DefaultNetworkController) addLogicalPort(pod *kapi.Pod) (err error) { if ops, err = libovsdbops.AddPortsToPortGroupOps(oc.nbClient, ops, pgName, lsp.UUID); err != nil { return err } + // set open ports for UDN pods, use function without transact, since lsp is not created yet. + var parseErr error + ops, parseErr, err = oc.setUDNPodOpenPortsOps(pod.Namespace+"/"+pod.Name, pod.Annotations, lsp.Name, ops) + err = utilerrors.Join(parseErr, err) + if err != nil { + return fmt.Errorf("failed to set UDN pod %s/%s open ports: %w", pod.Namespace, pod.Name, err) + } } // Ensure the namespace/nsInfo exists @@ -296,7 +314,7 @@ func (oc *DefaultNetworkController) addLogicalPort(pod *kapi.Pod) (err error) { // namespace annotations to go through external egress router if extIPs, err := getExternalIPsGR(oc.watchFactory, pod.Spec.NodeName); err != nil { return err - } else if ops, err = addOrUpdatePodSNATOps(oc.nbClient, oc.GetNetworkScopedGWRouterName(pod.Spec.NodeName), extIPs, podAnnotation.IPs, ops); err != nil { + } else if ops, err = addOrUpdatePodSNATOps(oc.nbClient, oc.GetNetworkScopedGWRouterName(pod.Spec.NodeName), extIPs, podAnnotation.IPs, "", ops); err != nil { return err } } diff --git a/go-controller/pkg/ovn/pods_test.go b/go-controller/pkg/ovn/pods_test.go index 422d7e003c..33970cb9c9 100644 --- a/go-controller/pkg/ovn/pods_test.go +++ b/go-controller/pkg/ovn/pods_test.go @@ -21,12 +21,13 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" v1 "k8s.io/api/core/v1" + kapierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" utilnet "k8s.io/utils/net" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "github.com/urfave/cli/v2" ) @@ -52,6 +53,29 @@ func newPodMeta(namespace, name string, additionalLabels map[string]string) meta } } +func newPodWithLabelsAllIPFamilies(namespace, name, node string, podIPs []string, additionalLabels map[string]string) *v1.Pod { + podIPList := []v1.PodIP{} + for _, podIP := range podIPs { + podIPList = append(podIPList, v1.PodIP{IP: podIP}) + } + return &v1.Pod{ + ObjectMeta: newPodMeta(namespace, name, additionalLabels), + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "containerName", + Image: "containerImage", + }, + }, + NodeName: node, + }, + Status: v1.PodStatus{ + Phase: v1.PodRunning, + PodIP: podIPList[0].IP, + PodIPs: podIPList, + }, + } +} func newPodWithLabels(namespace, name, node, podIP string, additionalLabels map[string]string) *v1.Pod { podIPs := []v1.PodIP{} if podIP != "" { @@ -293,16 +317,20 @@ func newTPod(nodeName, nodeSubnet, nodeMgtIP, nodeGWIP, podName, podIPs, podMAC, return to } -func (p testPod) populateLogicalSwitchCache(fakeOvn *FakeOVN) { +func (p testPod) populateControllerLogicalSwitchCache(bnc *BaseNetworkController) { gomega.Expect(p.nodeName).NotTo(gomega.Equal("")) subnets := []*net.IPNet{} for _, subnet := range strings.Split(p.nodeSubnet, " ") { subnets = append(subnets, ovntest.MustParseIPNet(subnet)) } - err := fakeOvn.controller.lsManager.AddOrUpdateSwitch(p.nodeName, subnets) + err := bnc.lsManager.AddOrUpdateSwitch(bnc.GetNetworkScopedSwitchName(p.nodeName), subnets) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } +func (p testPod) populateLogicalSwitchCache(fakeOvn *FakeOVN) { + p.populateControllerLogicalSwitchCache(&fakeOvn.controller.BaseNetworkController) +} + func (p testPod) getAnnotationsJson() string { type podRoute struct { Dest string `json:"dest"` @@ -405,14 +433,27 @@ func setPodAnnotations(podObj *v1.Pod, testPod testPod) { } func getDefaultNetExpectedPodsAndSwitches(pods []testPod, nodes []string) []libovsdbtest.TestData { - return getExpectedDataPodsSwitchesPortGroup(pods, nodes, "") + return getDefaultNetExpectedDataPodsSwitchesPortGroup(pods, nodes, "") +} + +func getExpectedPodsAndSwitches(netInfo util.NetInfo, pods []testPod, nodes []string) []libovsdbtest.TestData { + return getExpectedDataPodsSwitchesPortGroup(netInfo, pods, nodes, "") +} + +func getDefaultNetExpectedDataPodsSwitchesPortGroup(pods []testPod, nodes []string, namespacedPortGroup string) []libovsdbtest.TestData { + return getExpectedDataPodsSwitchesPortGroup(&util.DefaultNetInfo{}, pods, nodes, namespacedPortGroup) } -func getExpectedDataPodsSwitchesPortGroup(pods []testPod, nodes []string, namespacedPortGroup string) []libovsdbtest.TestData { +func getExpectedDataPodsSwitchesPortGroup(netInfo util.NetInfo, pods []testPod, nodes []string, namespacedPortGroup string) []libovsdbtest.TestData { nodeslsps := make(map[string][]string) var logicalSwitchPorts []*nbdb.LogicalSwitchPort for _, pod := range pods { - portName := util.GetLogicalPortName(pod.namespace, pod.podName) + var portName string + if netInfo.IsDefault() { + portName = util.GetLogicalPortName(pod.namespace, pod.podName) + } else { + portName = util.GetSecondaryNetworkLogicalPortName(pod.namespace, pod.podName, netInfo.GetNADs()[0]) + } var lspUUID string if len(pod.portUUID) == 0 { lspUUID = portName + "-UUID" @@ -437,14 +478,19 @@ func getExpectedDataPodsSwitchesPortGroup(pods []testPod, nodes []string, namesp if pod.noIfaceIdVer { delete(lsp.Options, "iface-id-ver") } + if !netInfo.IsDefault() { + lsp.ExternalIDs["k8s.ovn.org/network"] = netInfo.GetNetworkName() + lsp.ExternalIDs["k8s.ovn.org/nad"] = netInfo.GetNADs()[0] + lsp.ExternalIDs["k8s.ovn.org/topology"] = netInfo.TopologyType() + } logicalSwitchPorts = append(logicalSwitchPorts, lsp) nodeslsps[pod.nodeName] = append(nodeslsps[pod.nodeName], lspUUID) } var logicalSwitches []*nbdb.LogicalSwitch for _, node := range nodes { logicalSwitches = append(logicalSwitches, &nbdb.LogicalSwitch{ - UUID: node + "-UUID", - Name: node, + UUID: netInfo.GetNetworkScopedSwitchName(node) + "-UUID", + Name: netInfo.GetNetworkScopedSwitchName(node), Ports: nodeslsps[node], }) } @@ -574,7 +620,7 @@ var _ = ginkgo.Describe("OVN Pod Operations", func() { }, 2).Should(gomega.MatchJSON(t.getAnnotationsJson())) gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData( - getExpectedDataPodsSwitchesPortGroup([]testPod{t}, []string{"node1"}, namespaceT.Name))) + getDefaultNetExpectedDataPodsSwitchesPortGroup([]testPod{t}, []string{"node1"}, namespaceT.Name))) return nil } @@ -619,8 +665,8 @@ var _ = ginkgo.Describe("OVN Pod Operations", func() { err = fakeOvn.controller.WatchPods() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - pod, _ := fakeOvn.fakeClient.KubeClient.CoreV1().Pods(t.namespace).Get(context.TODO(), t.podName, metav1.GetOptions{}) - gomega.Expect(pod).To(gomega.BeNil()) + _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(t.namespace).Get(context.TODO(), t.podName, metav1.GetOptions{}) + gomega.Expect(err).To(gomega.MatchError(kapierrors.IsNotFound, "IsNotFound")) _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(t.namespace).Create(context.TODO(), newPod(t.namespace, t.podName, t.nodeName, t.podIP), metav1.CreateOptions{}) @@ -674,8 +720,8 @@ var _ = ginkgo.Describe("OVN Pod Operations", func() { err = fakeOvn.controller.WatchPods() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - pod, _ := fakeOvn.fakeClient.KubeClient.CoreV1().Pods(t.namespace).Get(context.TODO(), t.podName, metav1.GetOptions{}) - gomega.Expect(pod).To(gomega.BeNil()) + _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(t.namespace).Get(context.TODO(), t.podName, metav1.GetOptions{}) + gomega.Expect(err).To(gomega.MatchError(kapierrors.IsNotFound, "IsNotFound")) myPod := newPod(t.namespace, t.podName, t.nodeName, t.podIP) _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(t.namespace).Create(context.TODO(), @@ -786,8 +832,8 @@ var _ = ginkgo.Describe("OVN Pod Operations", func() { err = fakeOvn.controller.WatchPods() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - pod, _ := fakeOvn.fakeClient.KubeClient.CoreV1().Pods(t.namespace).Get(context.TODO(), t.podName, metav1.GetOptions{}) - gomega.Expect(pod).To(gomega.BeNil()) + _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(t.namespace).Get(context.TODO(), t.podName, metav1.GetOptions{}) + gomega.Expect(err).To(gomega.MatchError(kapierrors.IsNotFound, "IsNotFound")) myPod := newPod(t.namespace, t.podName, t.nodeName, t.podIP) _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(t.namespace).Create(context.TODO(), @@ -1008,8 +1054,8 @@ var _ = ginkgo.Describe("OVN Pod Operations", func() { err = fakeOvn.controller.WatchPods() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - pod, _ := fakeOvn.fakeClient.KubeClient.CoreV1().Pods(t.namespace).Get(context.TODO(), t.podName, metav1.GetOptions{}) - gomega.Expect(pod).To(gomega.BeNil()) + _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(t.namespace).Get(context.TODO(), t.podName, metav1.GetOptions{}) + gomega.Expect(err).To(gomega.MatchError(kapierrors.IsNotFound, "IsNotFound")) podObj := &v1.Pod{ Spec: v1.PodSpec{NodeName: "node1"}, @@ -1404,10 +1450,9 @@ var _ = ginkgo.Describe("OVN Pod Operations", func() { fakeOvn.controller.retryPods.RequestRetryObjs() // check that the pod is not in API server - pod, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(podTest.namespace).Get( + _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(podTest.namespace).Get( context.TODO(), podTest.podName, metav1.GetOptions{}) - gomega.Expect(err).To(gomega.HaveOccurred()) - gomega.Expect(pod).To(gomega.BeNil()) + gomega.Expect(err).To(gomega.MatchError(kapierrors.IsNotFound, "IsNotFound")) // check that the retry cache no longer has the entry retry.CheckRetryObjectEventually(key, false, fakeOvn.controller.retryPods) @@ -1602,9 +1647,8 @@ var _ = ginkgo.Describe("OVN Pod Operations", func() { err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(t.namespace).Delete(context.TODO(), t.podName, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - pod, err := fakeOvn.fakeClient.KubeClient.CoreV1().Pods(t.namespace).Get(context.TODO(), t.podName, metav1.GetOptions{}) - gomega.Expect(err).To(gomega.HaveOccurred()) - gomega.Expect(pod).To(gomega.BeNil()) + _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(t.namespace).Get(context.TODO(), t.podName, metav1.GetOptions{}) + gomega.Expect(err).To(gomega.MatchError(kapierrors.IsNotFound, "IsNotFound")) gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(getDefaultNetExpectedPodsAndSwitches([]testPod{}, []string{"node1"}))) return nil diff --git a/go-controller/pkg/ovn/policy_stale_test.go b/go-controller/pkg/ovn/policy_stale_test.go index 4d6a4e8c7c..b732e24b7e 100644 --- a/go-controller/pkg/ovn/policy_stale_test.go +++ b/go-controller/pkg/ovn/policy_stale_test.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" @@ -314,7 +314,7 @@ var _ = ginkgo.Describe("OVN Stale NetworkPolicy Operations", func() { staleAddrSetIDs := getStaleNetpolAddrSetDbIDs(networkPolicy.Namespace, networkPolicy.Name, "egress", "0", DefaultNetworkControllerName) localASName, _ := addressset.GetHashNamesForAS(staleAddrSetIDs) - peerASName, _ := getNsAddrSetHashNames(namespace2.Name) + peerASName, _ := getDefaultNetNsAddrSetHashNames(namespace2.Name) fakeController := getFakeController(DefaultNetworkControllerName) pgName := fakeController.getNetworkPolicyPGName(networkPolicy.Namespace, networkPolicy.Name) initialData := getPolicyData(newNetpolDataParams(networkPolicy).withPeerNamespaces(namespace2.Name)) diff --git a/go-controller/pkg/ovn/policy_test.go b/go-controller/pkg/ovn/policy_test.go index bdcc7bfca8..4bb6363d07 100644 --- a/go-controller/pkg/ovn/policy_test.go +++ b/go-controller/pkg/ovn/policy_test.go @@ -8,8 +8,8 @@ import ( "sort" "time" - "github.com/onsi/ginkgo" - "github.com/onsi/ginkgo/extensions/table" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" "github.com/onsi/gomega/format" "github.com/urfave/cli/v2" @@ -451,8 +451,13 @@ func getNamespaceWithMultiplePoliciesExpectedData(networkPolicies []*knet.Networ } func getHairpinningACLsV4AndPortGroup() []libovsdbtest.TestData { - clusterPortGroup := newClusterPortGroup() - fakeController := getFakeController(DefaultNetworkControllerName) + return getHairpinningACLsV4AndPortGroupForNetwork(&util.DefaultNetInfo{}, nil) +} + +func getHairpinningACLsV4AndPortGroupForNetwork(netInfo util.NetInfo, ports []string) []libovsdbtest.TestData { + controllerName := getNetworkControllerName(netInfo.GetNetworkName()) + clusterPortGroup := newNetworkClusterPortGroup(netInfo) + fakeController := getFakeController(controllerName) egressIDs := fakeController.getNetpolDefaultACLDbIDs("Egress") egressACL := libovsdbops.BuildACL( "", @@ -469,7 +474,7 @@ func getHairpinningACLsV4AndPortGroup() []libovsdbtest.TestData { }, types.DefaultACLTier, ) - egressACL.UUID = "hairpinning-egress-UUID" + egressACL.UUID = fmt.Sprintf("hp-egress-%s", controllerName) ingressIDs := fakeController.getNetpolDefaultACLDbIDs("Ingress") ingressACL := libovsdbops.BuildACL( "", @@ -484,8 +489,9 @@ func getHairpinningACLsV4AndPortGroup() []libovsdbtest.TestData { nil, types.DefaultACLTier, ) - ingressACL.UUID = "hairpinning-ingress-UUID" + ingressACL.UUID = fmt.Sprintf("hp-ingress-%s", controllerName) clusterPortGroup.ACLs = []string{egressACL.UUID, ingressACL.UUID} + clusterPortGroup.Ports = ports return []libovsdbtest.TestData{egressACL, ingressACL, clusterPortGroup} } @@ -949,7 +955,7 @@ var _ = ginkgo.Describe("OVN NetworkPolicy Operations", func() { }) ginkgo.Context("during execution", func() { - table.DescribeTable("correctly uses namespace and shared peer selector address sets", + ginkgo.DescribeTable("correctly uses namespace and shared peer selector address sets", func(peer knet.NetworkPolicyPeer, peerNamespaces []string) { namespace1 := *newNamespace(namespaceName1) namespace2 := *newNamespace(namespaceName2) @@ -974,11 +980,11 @@ var _ = ginkgo.Describe("OVN NetworkPolicy Operations", func() { } gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedData...)) }, - table.Entry("empty pod selector => use pod selector", + ginkgo.Entry("empty pod selector => use pod selector", knet.NetworkPolicyPeer{ PodSelector: &metav1.LabelSelector{}, }, nil), - table.Entry("namespace selector with nil pod selector => use a set of selected namespace address sets", + ginkgo.Entry("namespace selector with nil pod selector => use a set of selected namespace address sets", knet.NetworkPolicyPeer{ NamespaceSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -986,12 +992,12 @@ var _ = ginkgo.Describe("OVN NetworkPolicy Operations", func() { }, }, }, []string{namespaceName2}), - table.Entry("empty namespace and pod selector => use all pods shared address set", + ginkgo.Entry("empty namespace and pod selector => use all pods shared address set", knet.NetworkPolicyPeer{ PodSelector: &metav1.LabelSelector{}, NamespaceSelector: &metav1.LabelSelector{}, }, nil), - table.Entry("pod selector with nil namespace => use static namespace+pod selector", + ginkgo.Entry("pod selector with nil namespace => use static namespace+pod selector", knet.NetworkPolicyPeer{ PodSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -999,7 +1005,7 @@ var _ = ginkgo.Describe("OVN NetworkPolicy Operations", func() { }, }, }, nil), - table.Entry("pod selector with namespace selector => use namespace selector+pod selector", + ginkgo.Entry("pod selector with namespace selector => use namespace selector+pod selector", knet.NetworkPolicyPeer{ PodSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -1012,7 +1018,7 @@ var _ = ginkgo.Describe("OVN NetworkPolicy Operations", func() { }, }, }, nil), - table.Entry("pod selector with empty namespace selector => use global pod selector", + ginkgo.Entry("pod selector with empty namespace selector => use global pod selector", knet.NetworkPolicyPeer{ PodSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -1936,7 +1942,7 @@ var _ = ginkgo.Describe("OVN NetworkPolicy Operations", func() { originalNamespace.Annotations = map[string]string{util.AclLoggingAnnotation: originalACLLogSeverity} }) - table.DescribeTable("ACL logging for network policies reacts to severity updates", func(networkPolicies ...*knet.NetworkPolicy) { + ginkgo.DescribeTable("ACL logging for network policies reacts to severity updates", func(networkPolicies ...*knet.NetworkPolicy) { ginkgo.By("Provisioning the system with an initial empty policy, we know deterministically the names of the default deny ACLs") initialDenyAllPolicy := newNetworkPolicy("emptyPol", namespaceName1, metav1.LabelSelector{}, nil, nil) // originalACLLogSeverity.Deny == nbdb.ACLSeverityAlert @@ -2000,12 +2006,12 @@ var _ = ginkgo.Describe("OVN NetworkPolicy Operations", func() { } gomega.Expect(app.Run([]string{app.Name})).To(gomega.Succeed()) }, - table.Entry("when the namespace features a network policy with a single rule", + ginkgo.Entry("when the namespace features a network policy with a single rule", getMatchLabelsNetworkPolicy(netPolicyName1, namespaceName1, namespaceName2, "", true, false)), - table.Entry("when the namespace features *multiple* network policies with a single rule", + ginkgo.Entry("when the namespace features *multiple* network policies with a single rule", getMatchLabelsNetworkPolicy(netPolicyName1, namespaceName1, namespaceName2, "", true, false), getMatchLabelsNetworkPolicy(netPolicyName2, namespaceName1, namespaceName2, "", false, true)), - table.Entry("when the namespace features a network policy with *multiple* rules", + ginkgo.Entry("when the namespace features a network policy with *multiple* rules", getMatchLabelsNetworkPolicy(netPolicyName1, namespaceName1, namespaceName2, "tiny-winy-pod", true, false))) ginkgo.It("policies created after namespace logging level updates inherit updated logging level", func() { diff --git a/go-controller/pkg/ovn/port_cache.go b/go-controller/pkg/ovn/port_cache.go index f4a0dbe3ba..84cde9b414 100644 --- a/go-controller/pkg/ovn/port_cache.go +++ b/go-controller/pkg/ovn/port_cache.go @@ -13,7 +13,7 @@ import ( "k8s.io/klog/v2" ) -type portCache struct { +type PortCache struct { sync.RWMutex stopChan <-chan struct{} @@ -33,14 +33,14 @@ type lpInfo struct { expires time.Time } -func newPortCache(stopChan <-chan struct{}) *portCache { - return &portCache{ +func NewPortCache(stopChan <-chan struct{}) *PortCache { + return &PortCache{ stopChan: stopChan, cache: make(map[string]map[string]*lpInfo), } } -func (c *portCache) get(pod *kapi.Pod, nadName string) (*lpInfo, error) { +func (c *PortCache) get(pod *kapi.Pod, nadName string) (*lpInfo, error) { var logicalPort string podName := fmt.Sprintf("%s/%s", pod.Namespace, pod.Name) @@ -60,7 +60,7 @@ func (c *portCache) get(pod *kapi.Pod, nadName string) (*lpInfo, error) { return nil, fmt.Errorf("logical port %s for pod %s not found in cache", podName, logicalPort) } -func (c *portCache) getAll(pod *kapi.Pod) (map[string]*lpInfo, error) { +func (c *PortCache) getAll(pod *kapi.Pod) (map[string]*lpInfo, error) { podName := fmt.Sprintf("%s/%s", pod.Namespace, pod.Name) c.RLock() defer c.RUnlock() @@ -75,7 +75,7 @@ func (c *portCache) getAll(pod *kapi.Pod) (map[string]*lpInfo, error) { return nil, fmt.Errorf("logical port cache for pod %s not found", podName) } -func (c *portCache) add(pod *kapi.Pod, logicalSwitch, nadName, uuid string, mac net.HardwareAddr, ips []*net.IPNet) *lpInfo { +func (c *PortCache) add(pod *kapi.Pod, logicalSwitch, nadName, uuid string, mac net.HardwareAddr, ips []*net.IPNet) *lpInfo { var logicalPort string podName := fmt.Sprintf("%s/%s", pod.Namespace, pod.Name) @@ -105,7 +105,7 @@ func (c *portCache) add(pod *kapi.Pod, logicalSwitch, nadName, uuid string, mac return portInfo } -func (c *portCache) remove(pod *kapi.Pod, nadName string) { +func (c *PortCache) remove(pod *kapi.Pod, nadName string) { var logicalPort string podName := fmt.Sprintf("%s/%s", pod.Namespace, pod.Name) diff --git a/go-controller/pkg/ovn/secondary_layer2_network_controller.go b/go-controller/pkg/ovn/secondary_layer2_network_controller.go index a251084722..c69cafbbab 100644 --- a/go-controller/pkg/ovn/secondary_layer2_network_controller.go +++ b/go-controller/pkg/ovn/secondary_layer2_network_controller.go @@ -5,16 +5,21 @@ import ( "fmt" "net" "reflect" + "strconv" + "strings" "sync" "time" - mnpapi "github.com/k8snetworkplumbingwg/multi-networkpolicy/pkg/apis/k8s.cni.cncf.io/v1beta1" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/pod" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/generator/udn" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + nad "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/network-attach-def-controller" addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" + svccontroller "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/controller/services" lsm "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/logical_switch_manager" zoneinterconnect "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/zone_interconnect" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/persistentips" @@ -61,27 +66,22 @@ func (h *secondaryLayer2NetworkControllerEventHandler) GetResourceFromInformerCa // RecordAddEvent records the add event on this given object. func (h *secondaryLayer2NetworkControllerEventHandler) RecordAddEvent(obj interface{}) { - switch h.objType { - case factory.MultiNetworkPolicyType: - mnp := obj.(*mnpapi.MultiNetworkPolicy) - klog.V(5).Infof("Recording add event on multinetwork policy %s/%s", mnp.Namespace, mnp.Name) - metrics.GetConfigDurationRecorder().Start("multinetworkpolicy", mnp.Namespace, mnp.Name) - } + h.baseHandler.recordAddEvent(h.objType, obj) } // RecordUpdateEvent records the udpate event on this given object. func (h *secondaryLayer2NetworkControllerEventHandler) RecordUpdateEvent(obj interface{}) { - h.baseHandler.recordAddEvent(h.objType, obj) + h.baseHandler.recordUpdateEvent(h.objType, obj) } // RecordDeleteEvent records the delete event on this given object. func (h *secondaryLayer2NetworkControllerEventHandler) RecordDeleteEvent(obj interface{}) { - h.baseHandler.recordAddEvent(h.objType, obj) + h.baseHandler.recordDeleteEvent(h.objType, obj) } // RecordSuccessEvent records the success event on this given object. func (h *secondaryLayer2NetworkControllerEventHandler) RecordSuccessEvent(obj interface{}) { - h.baseHandler.recordAddEvent(h.objType, obj) + h.baseHandler.recordSuccessEvent(h.objType, obj) } // RecordErrorEvent records the error event on this given object. @@ -115,7 +115,7 @@ func (h *secondaryLayer2NetworkControllerEventHandler) AddResource(obj interface } return h.oc.addUpdateLocalNodeEvent(node, nodeParams) } - return h.oc.addUpdateRemoteNodeEvent(node) + return h.oc.addUpdateRemoteNodeEvent(node, config.OVNKubernetesFeature.EnableInterconnect) default: return h.oc.AddSecondaryNetworkResourceCommon(h.objType, obj) } @@ -153,20 +153,22 @@ func (h *secondaryLayer2NetworkControllerEventHandler) UpdateResource(oldObj, ne return fmt.Errorf("could not cast oldObj of type %T to *kapi.Node", oldObj) } newNodeIsLocalZoneNode := h.oc.isLocalZoneNode(newNode) - nodeSubnetChanged := nodeSubnetChanged(oldNode, newNode) + nodeSubnetChanged := nodeSubnetChanged(oldNode, newNode, h.oc.NetInfo.GetNetworkName()) if newNodeIsLocalZoneNode { var nodeSyncsParam *nodeSyncs if h.oc.isLocalZoneNode(oldNode) { - // determine what actually changed in this update - syncMgmtPort := macAddressChanged(oldNode, newNode, h.oc.NetInfo.GetNetworkName()) || nodeSubnetChanged - + // determine what actually changed in this update and combine that with what failed previously + _, mgmtUpdateFailed := h.oc.mgmtPortFailed.Load(newNode.Name) + shouldSyncMgmtPort := mgmtUpdateFailed || + macAddressChanged(oldNode, newNode, h.oc.NetInfo.GetNetworkName()) || + nodeSubnetChanged _, gwUpdateFailed := h.oc.gatewaysFailed.Load(newNode.Name) shouldSyncGW := gwUpdateFailed || gatewayChanged(oldNode, newNode) || hostCIDRsChanged(oldNode, newNode) || nodeGatewayMTUSupportChanged(oldNode, newNode) - nodeSyncsParam = &nodeSyncs{syncMgmtPort: syncMgmtPort, syncGw: shouldSyncGW} + nodeSyncsParam = &nodeSyncs{syncMgmtPort: shouldSyncMgmtPort, syncGw: shouldSyncGW} } else { klog.Infof("Node %s moved from the remote zone %s to local zone %s.", newNode.Name, util.GetNodeZone(oldNode), util.GetNodeZone(newNode)) @@ -176,7 +178,8 @@ func (h *secondaryLayer2NetworkControllerEventHandler) UpdateResource(oldObj, ne return h.oc.addUpdateLocalNodeEvent(newNode, nodeSyncsParam) } else { - return h.oc.addUpdateRemoteNodeEvent(newNode) + _, syncZoneIC := h.oc.syncZoneICFailed.Load(newNode.Name) + return h.oc.addUpdateRemoteNodeEvent(newNode, syncZoneIC) } default: return h.oc.UpdateSecondaryNetworkResourceCommon(h.objType, oldObj, newObj, inRetryCache) @@ -200,6 +203,9 @@ func (h *secondaryLayer2NetworkControllerEventHandler) SyncFunc(objs []interface case factory.NamespaceType: syncFunc = h.oc.syncNamespaces + case factory.PolicyType: + syncFunc = h.oc.syncNetworkPolicies + case factory.MultiNetworkPolicyType: syncFunc = h.oc.syncMultiNetworkPolicies @@ -228,17 +234,33 @@ type SecondaryLayer2NetworkController struct { BaseSecondaryLayer2NetworkController // Node-specific syncMaps used by node event handler - mgmtPortFailed sync.Map - gatewaysFailed sync.Map + mgmtPortFailed sync.Map + gatewaysFailed sync.Map + syncZoneICFailed sync.Map // Cluster-wide router default Control Plane Protection (COPP) UUID defaultCOPPUUID string gatewayManagers sync.Map + + // Cluster wide Load_Balancer_Group UUID. + // Includes the cluster switch and all node gateway routers. + clusterLoadBalancerGroupUUID string + + // Cluster wide switch Load_Balancer_Group UUID. + // Includes the cluster switch. + switchLoadBalancerGroupUUID string + + // Cluster wide router Load_Balancer_Group UUID. + // Includes all node gateway routers. + routerLoadBalancerGroupUUID string + + // Controller in charge of services + svcController *svccontroller.Controller } // NewSecondaryLayer2NetworkController create a new OVN controller for the given secondary layer2 nad -func NewSecondaryLayer2NetworkController(cnci *CommonNetworkControllerInfo, netInfo util.NetInfo) *SecondaryLayer2NetworkController { +func NewSecondaryLayer2NetworkController(cnci *CommonNetworkControllerInfo, netInfo util.NetInfo, nadController nad.NADController) (*SecondaryLayer2NetworkController, error) { stopChan := make(chan struct{}) @@ -249,6 +271,24 @@ func NewSecondaryLayer2NetworkController(cnci *CommonNetworkControllerInfo, netI if netInfo.IsPrimaryNetwork() { lsManagerFactoryFn = lsm.NewL2SwitchManagerForUserDefinedPrimaryNetwork } + + var svcController *svccontroller.Controller + if util.IsNetworkSegmentationSupportEnabled() { + var err error + svcController, err = svccontroller.NewController( + cnci.client, cnci.nbClient, + cnci.watchFactory.ServiceCoreInformer(), + cnci.watchFactory.EndpointSliceCoreInformer(), + cnci.watchFactory.NodeCoreInformer(), + nadController, + cnci.recorder, + netInfo, + ) + if err != nil { + return nil, fmt.Errorf("unable to create new service controller while creating new layer2 network controller: %w", err) + } + } + oc := &SecondaryLayer2NetworkController{ BaseSecondaryLayer2NetworkController: BaseSecondaryLayer2NetworkController{ @@ -258,7 +298,7 @@ func NewSecondaryLayer2NetworkController(cnci *CommonNetworkControllerInfo, netI controllerName: getNetworkControllerName(netInfo.GetNetworkName()), NetInfo: netInfo, lsManager: lsManagerFactoryFn(), - logicalPortCache: newPortCache(stopChan), + logicalPortCache: NewPortCache(stopChan), namespaces: make(map[string]*namespaceInfo), namespacesMutex: sync.Mutex{}, addressSetFactory: addressSetFactory, @@ -269,11 +309,14 @@ func NewSecondaryLayer2NetworkController(cnci *CommonNetworkControllerInfo, netI wg: &sync.WaitGroup{}, localZoneNodes: &sync.Map{}, cancelableCtx: util.NewCancelableContext(), + nadController: nadController, }, }, }, - mgmtPortFailed: sync.Map{}, - gatewayManagers: sync.Map{}, + mgmtPortFailed: sync.Map{}, + syncZoneICFailed: sync.Map{}, + gatewayManagers: sync.Map{}, + svcController: svcController, } if config.OVNKubernetesFeature.EnableInterconnect { @@ -298,12 +341,12 @@ func NewSecondaryLayer2NetworkController(cnci *CommonNetworkControllerInfo, netI claimsReconciler) } - // disable multicast support for secondary networks - // TBD: changes needs to be made to support multicast in secondary networks - oc.multicastSupport = false + // enable multicast support for UDN only for primaries + multicast enabled + // TBD: changes needs to be made to support multicast beyond primary UDN + oc.multicastSupport = oc.IsPrimaryNetwork() && util.IsNetworkSegmentationSupportEnabled() && config.EnableMulticast oc.initRetryFramework() - return oc + return oc, nil } // Start starts the secondary layer2 controller, handles all events and creates all needed logical entities @@ -319,11 +362,26 @@ func (oc *SecondaryLayer2NetworkController) Start(ctx context.Context) error { return err } - return oc.run(ctx) + return oc.run() } -func (oc *SecondaryLayer2NetworkController) run(ctx context.Context) error { - return oc.BaseSecondaryLayer2NetworkController.run() +func (oc *SecondaryLayer2NetworkController) run() error { + err := oc.BaseSecondaryLayer2NetworkController.run() + if err != nil { + return err + } + if oc.svcController != nil { + startSvc := time.Now() + + err := oc.StartServiceController(oc.wg, true) + endSvc := time.Since(startSvc) + + metrics.MetricOVNKubeControllerSyncDuration.WithLabelValues("service_" + oc.GetNetworkName()).Set(endSvc.Seconds()) + if err != nil { + return err + } + } + return nil } // Cleanup cleans up logical entities for the given network, called from net-attach-def routine @@ -360,11 +418,36 @@ func (oc *SecondaryLayer2NetworkController) Init() error { } oc.defaultCOPPUUID = defaultCOPPUUID + clusterLBGroupUUID, switchLBGroupUUID, routerLBGroupUUID, err := initLoadBalancerGroups(oc.nbClient, oc.NetInfo) + if err != nil { + return err + } + oc.clusterLoadBalancerGroupUUID = clusterLBGroupUUID + oc.switchLoadBalancerGroupUUID = switchLBGroupUUID + oc.routerLoadBalancerGroupUUID = routerLBGroupUUID + _, err = oc.initializeLogicalSwitch( oc.GetNetworkScopedSwitchName(types.OVNLayer2Switch), oc.Subnets(), oc.ExcludeSubnets(), + oc.clusterLoadBalancerGroupUUID, + oc.switchLoadBalancerGroupUUID, ) + if err != nil { + return err + } + + // Configure cluster port groups and multicast default policies for user defined primary networks. + if oc.IsPrimaryNetwork() && util.IsNetworkSegmentationSupportEnabled() { + if err := oc.setupClusterPortGroups(); err != nil { + return fmt.Errorf("failed to create cluster port groups for network %q: %w", oc.GetNetworkName(), err) + } + + if err := oc.syncDefaultMulticastPolicies(); err != nil { + return fmt.Errorf("failed to sync default multicast policies for network %q: %w", oc.GetNetworkName(), err) + } + } + return err } @@ -380,12 +463,19 @@ func (oc *SecondaryLayer2NetworkController) initRetryFramework() { oc.retryIPAMClaims = oc.newRetryFramework(factory.IPAMClaimsType) } + // When a user-defined network is enabled as a primary network for namespace, + // then watch for namespace and network policy events. + if oc.IsPrimaryNetwork() { + oc.retryNamespaces = oc.newRetryFramework(factory.NamespaceType) + oc.retryNetworkPolicies = oc.newRetryFramework(factory.PolicyType) + } + // For secondary networks, we don't have to watch namespace events if // multi-network policy support is not enabled. We don't support // multi-network policy for IPAM-less secondary networks either. if util.IsMultiNetworkPoliciesSupportEnabled() { oc.retryNamespaces = oc.newRetryFramework(factory.NamespaceType) - oc.retryNetworkPolicies = oc.newRetryFramework(factory.MultiNetworkPolicyType) + oc.retryMultiNetworkPolicies = oc.newRetryFramework(factory.MultiNetworkPolicyType) } } @@ -417,22 +507,6 @@ func (oc *SecondaryLayer2NetworkController) addUpdateLocalNodeEvent(node *corev1 var errs []error if util.IsNetworkSegmentationSupportEnabled() && oc.IsPrimaryNetwork() { - if nSyncs.syncMgmtPort { - // Layer 2 networks have a single, large subnet, that's the one - // associated to the controller. Take the management port IP from - // there. - subnets := oc.Subnets() - hostSubnets := make([]*net.IPNet, 0, len(subnets)) - for _, subnet := range oc.Subnets() { - hostSubnets = append(hostSubnets, subnet.CIDR) - } - if _, err := oc.syncNodeManagementPortNoRouteHostSubnets(node, oc.GetNetworkScopedSwitchName(types.OVNLayer2Switch), hostSubnets); err != nil { - errs = append(errs, err) - oc.mgmtPortFailed.Store(node.Name, true) - } else { - oc.mgmtPortFailed.Delete(node.Name) - } - } if nSyncs.syncGw { gwManager := oc.gatewayManagerForNode(node.Name) oc.gatewayManagers.Store(node.Name, gwManager) @@ -448,7 +522,7 @@ func (oc *SecondaryLayer2NetworkController) addUpdateLocalNodeEvent(node *corev1 gwConfig.hostSubnets, nil, gwConfig.hostSubnets, - gwConfig.gwLRPIPs, + gwConfig.gwLRPJoinIPs, // the joinIP allocated to this node for this controller's network oc.SCTPSupport, nil, // no need for ovnClusterLRPToJoinIfAddrs gwConfig.externalIPs, @@ -456,7 +530,30 @@ func (oc *SecondaryLayer2NetworkController) addUpdateLocalNodeEvent(node *corev1 errs = append(errs, err) oc.gatewaysFailed.Store(node.Name, true) } else { - oc.gatewaysFailed.Delete(node.Name) + if err := oc.addUDNClusterSubnetEgressSNAT(gwConfig.hostSubnets, gwManager.gwRouterName, node); err != nil { + errs = append(errs, err) + oc.gatewaysFailed.Store(node.Name, true) + } else { + oc.gatewaysFailed.Delete(node.Name) + } + } + } + } + if util.IsNetworkSegmentationSupportEnabled() && oc.IsPrimaryNetwork() { + if nSyncs.syncMgmtPort { + // Layer 2 networks have a single, large subnet, that's the one + // associated to the controller. Take the management port IP from + // there. + subnets := oc.Subnets() + hostSubnets := make([]*net.IPNet, 0, len(subnets)) + for _, subnet := range oc.Subnets() { + hostSubnets = append(hostSubnets, subnet.CIDR) + } + if _, err := oc.syncNodeManagementPort(node, oc.GetNetworkScopedSwitchName(types.OVNLayer2Switch), oc.GetNetworkScopedGWRouterName(node.Name), hostSubnets); err != nil { + errs = append(errs, err) + oc.mgmtPortFailed.Store(node.Name, true) + } else { + oc.mgmtPortFailed.Delete(node.Name) } } } @@ -471,6 +568,83 @@ func (oc *SecondaryLayer2NetworkController) addUpdateLocalNodeEvent(node *corev1 return err } +func (oc *SecondaryLayer2NetworkController) addUpdateRemoteNodeEvent(node *corev1.Node, syncZoneIC bool) error { + var errs []error + + if util.IsNetworkSegmentationSupportEnabled() && oc.IsPrimaryNetwork() { + if syncZoneIC && config.OVNKubernetesFeature.EnableInterconnect { + if err := oc.addPortForRemoteNodeGR(node); err != nil { + err = fmt.Errorf("failed to add the remote zone node %s's remote LRP, %w", node.Name, err) + errs = append(errs, err) + oc.syncZoneICFailed.Store(node.Name, true) + } else { + oc.syncZoneICFailed.Delete(node.Name) + } + } + } + + errs = append(errs, oc.BaseSecondaryLayer2NetworkController.addUpdateRemoteNodeEvent(node)) + + err := utilerrors.Join(errs...) + if err != nil { + oc.recordNodeErrorEvent(node, err) + } + return err +} + +func (oc *SecondaryLayer2NetworkController) addPortForRemoteNodeGR(node *corev1.Node) error { + nodeJoinSubnetIPs, err := util.ParseNodeGatewayRouterJoinAddrs(node, oc.GetNetworkName()) + if err != nil { + if util.IsAnnotationNotSetError(err) { + // remote node may not have the annotation yet, suppress it + return types.NewSuppressedError(err) + } + return fmt.Errorf("failed to get the node %s join subnet IPs: %w", node.Name, err) + } + if len(nodeJoinSubnetIPs) == 0 { + return fmt.Errorf("annotation on the node %s had empty join subnet IPs", node.Name) + } + + remoteGRPortMac := util.IPAddrToHWAddr(nodeJoinSubnetIPs[0].IP) + var remoteGRPortNetworks []string + for _, ip := range nodeJoinSubnetIPs { + remoteGRPortNetworks = append(remoteGRPortNetworks, ip.String()) + } + + remotePortAddr := remoteGRPortMac.String() + " " + strings.Join(remoteGRPortNetworks, " ") + klog.V(5).Infof("The remote port addresses for node %s in network %s are %s", node.Name, oc.GetNetworkName(), remotePortAddr) + logicalSwitchPort := nbdb.LogicalSwitchPort{ + Name: types.SwitchToRouterPrefix + oc.GetNetworkScopedSwitchName(types.OVNLayer2Switch) + "_" + node.Name, + Type: "remote", + Addresses: []string{remotePortAddr}, + } + logicalSwitchPort.ExternalIDs = map[string]string{ + types.NetworkExternalID: oc.GetNetworkName(), + types.TopologyExternalID: oc.TopologyType(), + types.NodeExternalID: node.Name, + } + tunnelID, err := util.ParseUDNLayer2NodeGRLRPTunnelIDs(node, oc.GetNetworkName()) + if err != nil { + if util.IsAnnotationNotSetError(err) { + // remote node may not have the annotation yet, suppress it + return types.NewSuppressedError(err) + } + // Don't consider this node as cluster-manager has not allocated node id yet. + return fmt.Errorf("failed to fetch tunnelID annotation from the node %s for network %s, err: %w", + node.Name, oc.GetNetworkName(), err) + } + logicalSwitchPort.Options = map[string]string{ + "requested-tnl-key": strconv.Itoa(tunnelID), + "requested-chassis": node.Name, + } + sw := nbdb.LogicalSwitch{Name: oc.GetNetworkScopedSwitchName(types.OVNLayer2Switch)} + err = libovsdbops.CreateOrUpdateLogicalSwitchPortsOnSwitch(oc.nbClient, &sw, &logicalSwitchPort) + if err != nil { + return fmt.Errorf("failed to create port %v on logical switch %q: %v", logicalSwitchPort, sw.Name, err) + } + return nil +} + func (oc *SecondaryLayer2NetworkController) deleteNodeEvent(node *corev1.Node) error { if err := oc.gatewayManagerForNode(node.Name).Cleanup(); err != nil { return fmt.Errorf("failed to cleanup gateway on node %q: %w", node.Name, err) @@ -481,11 +655,40 @@ func (oc *SecondaryLayer2NetworkController) deleteNodeEvent(node *corev1.Node) e return nil } +// addUDNClusterSubnetEgressSNAT adds the SNAT on each node's GR in L2 networks +// snat eth.dst == d6:cf:fd:2c:a6:44 169.254.0.12 10.128.0.0/14 +// snat eth.dst == d6:cf:fd:2c:a6:44 169.254.0.12 2010:100:200::/64 +// these SNATs are required for pod2Egress traffic in LGW mode and pod2SameNode traffic in SGW mode to function properly on UDNs +// SNAT Breakdown: +// match = "eth.dst == d6:cf:fd:2c:a6:44"; the MAC here is the mpX interface MAC address for this UDN +// logicalIP = "10.128.0.0/14"; which is the clustersubnet for this L2 UDN +// externalIP = "169.254.0.12"; which is the masqueradeIP for this L2 UDN +// so all in all we want to condionally SNAT all packets that are coming from pods hosted on this node, +// which are leaving via UDN's mpX interface to the UDN's masqueradeIP. +func (oc *SecondaryLayer2NetworkController) addUDNClusterSubnetEgressSNAT(localPodSubnets []*net.IPNet, routerName string, node *kapi.Node) error { + outputPort := types.GWRouterToJoinSwitchPrefix + routerName + nats, err := oc.buildUDNEgressSNAT(localPodSubnets, outputPort, node) + if err != nil { + return err + } + if len(nats) == 0 { + return nil // nothing to do + } + router := &nbdb.LogicalRouter{ + Name: routerName, + } + if err := libovsdbops.CreateOrUpdateNATs(oc.nbClient, router, nats...); err != nil { + return fmt.Errorf("failed to update SNAT for cluster on router: %q for network %q, error: %w", + routerName, oc.GetNetworkName(), err) + } + return nil +} + type SecondaryL2GatewayConfig struct { - config *util.L3GatewayConfig - hostSubnets []*net.IPNet - gwLRPIPs []*net.IPNet - externalIPs []net.IP + config *util.L3GatewayConfig + hostSubnets []*net.IPNet + gwLRPJoinIPs []*net.IPNet + externalIPs []net.IP } func (oc *SecondaryLayer2NetworkController) nodeGatewayConfig(node *corev1.Node) (*SecondaryL2GatewayConfig, error) { @@ -500,38 +703,20 @@ func (oc *SecondaryLayer2NetworkController) nodeGatewayConfig(node *corev1.Node) return nil, fmt.Errorf("failed to get networkID for network %q: %v", networkName, err) } - var ( - masqIPs []*net.IPNet - v4MasqIP *net.IPNet - v6MasqIP *net.IPNet - ) - - if config.IPv4Mode { - v4MasqIPs, err := udn.AllocateV4MasqueradeIPs(networkID) - if err != nil { - return nil, fmt.Errorf("failed to get v4 masquerade IP, network %s (%d): %v", networkName, networkID, err) - } - v4MasqIP = v4MasqIPs.GatewayRouter - masqIPs = append(masqIPs, v4MasqIP) - } - if config.IPv6Mode { - v6MasqIPs, err := udn.AllocateV6MasqueradeIPs(networkID) - if err != nil { - return nil, fmt.Errorf("failed to get v6 masquerade IP, network %s (%d): %v", networkName, networkID, err) - } - v6MasqIP = v6MasqIPs.GatewayRouter - masqIPs = append(masqIPs, v6MasqIP) + masqIPs, err := udn.GetUDNGatewayMasqueradeIPs(networkID) + if err != nil { + return nil, fmt.Errorf("failed to get masquerade IPs, network %s (%d): %v", networkName, networkID, err) } l3GatewayConfig.IPAddresses = append(l3GatewayConfig.IPAddresses, masqIPs...) // Always SNAT to the per network masquerade IP. var externalIPs []net.IP - if config.IPv4Mode && v4MasqIP != nil { - externalIPs = append(externalIPs, v4MasqIP.IP) - } - if config.IPv6Mode && v6MasqIP != nil { - externalIPs = append(externalIPs, v6MasqIP.IP) + for _, masqIP := range masqIPs { + if masqIP == nil { + continue + } + externalIPs = append(externalIPs, masqIP.IP) } // Use the host subnets present in the network attachment definition. @@ -542,25 +727,18 @@ func (oc *SecondaryLayer2NetworkController) nodeGatewayConfig(node *corev1.Node) // at layer2 the GR LRP should be different per node same we do for layer3 // since they should not collide at the distributed switch later on - gwLRPIPs, err := util.ParseNodeGatewayRouterJoinAddrs(node, networkName) + gwLRPJoinIPs, err := util.ParseNodeGatewayRouterJoinAddrs(node, networkName) if err != nil { return nil, fmt.Errorf("failed composing LRP addresses for layer2 network %s: %w", oc.GetNetworkName(), err) } - // At layer2 GR LRP acts as the layer3 ovn_cluster_router so we need - // to configure here the .1 address, this will work only for IC with - // one node per zone, since ARPs for .1 will not go beyond local switch. - for _, subnet := range oc.Subnets() { - gwLRPIPs = append(gwLRPIPs, util.GetNodeGatewayIfAddr(subnet.CIDR)) - } - // Overwrite the primary interface ID with the correct, per-network one. l3GatewayConfig.InterfaceID = oc.GetNetworkScopedExtPortName(l3GatewayConfig.BridgeID, node.Name) return &SecondaryL2GatewayConfig{ - config: l3GatewayConfig, - hostSubnets: hostSubnets, - gwLRPIPs: gwLRPIPs, - externalIPs: externalIPs, + config: l3GatewayConfig, + hostSubnets: hostSubnets, + gwLRPJoinIPs: gwLRPJoinIPs, + externalIPs: externalIPs, }, nil } @@ -572,6 +750,7 @@ func (oc *SecondaryLayer2NetworkController) newGatewayManager(nodeName string) * oc.nbClient, oc.NetInfo, oc.watchFactory, + oc.gatewayOptions()..., ) } @@ -592,3 +771,30 @@ func (oc *SecondaryLayer2NetworkController) gatewayManagerForNode(nodeName strin return gwManager } } + +func (oc *SecondaryLayer2NetworkController) gatewayOptions() []GatewayOption { + var opts []GatewayOption + if oc.clusterLoadBalancerGroupUUID != "" { + opts = append(opts, WithLoadBalancerGroups( + oc.routerLoadBalancerGroupUUID, + oc.clusterLoadBalancerGroupUUID, + oc.switchLoadBalancerGroupUUID, + )) + } + return opts +} + +func (oc *SecondaryLayer2NetworkController) StartServiceController(wg *sync.WaitGroup, runRepair bool) error { + wg.Add(1) + go func() { + defer wg.Done() + useLBGroups := oc.clusterLoadBalancerGroupUUID != "" + // use 5 workers like most of the kubernetes controllers in the kubernetes controller-manager + // do not use LB templates for UDNs - OVN bug https://issues.redhat.com/browse/FDP-988 + err := oc.svcController.Run(5, oc.stopChan, runRepair, useLBGroups, false) + if err != nil { + klog.Errorf("Error running OVN Kubernetes Services controller for network %s: %v", oc.GetNetworkName(), err) + } + }() + return nil +} diff --git a/go-controller/pkg/ovn/secondary_layer2_network_controller_test.go b/go-controller/pkg/ovn/secondary_layer2_network_controller_test.go index 0e579a1bf5..b24c77961b 100644 --- a/go-controller/pkg/ovn/secondary_layer2_network_controller_test.go +++ b/go-controller/pkg/ovn/secondary_layer2_network_controller_test.go @@ -4,27 +4,53 @@ import ( "context" "fmt" "net" + "strconv" "time" - . "github.com/onsi/ginkgo" - "github.com/onsi/ginkgo/extensions/table" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/urfave/cli/v2" + nadapi "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + knet "k8s.io/utils/net" + "k8s.io/utils/ptr" - nadapi "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + kubevirtv1 "kubevirt.io/api/core/v1" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + networkAttachDefController "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/network-attach-def-controller" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/nad" ovntypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) +type lspEnableValue *bool + +var ( + lspEnableNotSpecified lspEnableValue = nil + lspEnableExplicitlyTrue lspEnableValue = ptr.To(true) + lspEnableExplicitlyFalse lspEnableValue = ptr.To(false) +) + +type liveMigrationPodInfo struct { + podPhase v1.PodPhase + annotation map[string]string + creationTimestamp metav1.Time + expectedLspEnabled lspEnableValue +} + +type liveMigrationInfo struct { + vmName string + sourcePodInfo liveMigrationPodInfo + targetPodInfo liveMigrationPodInfo +} + var _ = Describe("OVN Multi-Homed pod operations for layer2 network", func() { var ( app *cli.App @@ -52,69 +78,21 @@ var _ = Describe("OVN Multi-Homed pod operations for layer2 network", func() { fakeOvn.shutdown() }) - table.DescribeTable( + DescribeTable( "reconciles a new", - func(netInfo secondaryNetInfo, testConfig testConfiguration) { - podInfo := dummyL2TestPod(ns, netInfo) - if testConfig.configToOverride != nil { - config.OVNKubernetesFeature = *testConfig.configToOverride - } + func(netInfo secondaryNetInfo, testConfig testConfiguration, gatewayMode config.GatewayMode) { + const podIdx = 0 + podInfo := dummyL2TestPod(ns, netInfo, podIdx) + setupConfig(netInfo, testConfig, gatewayMode) app.Action = func(ctx *cli.Context) error { - By(fmt.Sprintf("creating a network attachment definition for network: %s", netInfo.netName)) - nad, err := newNetworkAttachmentDefinition( - ns, - nadName, - *netInfo.netconf(), - ) - Expect(err).NotTo(HaveOccurred()) - By("setting up the OVN DB without any entities in it") - Expect(netInfo.setupOVNDependencies(&initialDB)).To(Succeed()) + pod := newMultiHomedPod(podInfo, netInfo) const nodeIPv4CIDR = "192.168.126.202/24" By(fmt.Sprintf("Creating a node named %q, with IP: %s", nodeName, nodeIPv4CIDR)) testNode, err := newNodeWithSecondaryNets(nodeName, nodeIPv4CIDR, netInfo) Expect(err).NotTo(HaveOccurred()) - fakeOvn.startWithDBSetup( - initialDB, - &v1.NamespaceList{ - Items: []v1.Namespace{ - *newNamespace(ns), - }, - }, - &v1.NodeList{Items: []v1.Node{*testNode}}, - &v1.PodList{ - Items: []v1.Pod{ - *newMultiHomedPod(podInfo.namespace, podInfo.podName, podInfo.nodeName, podInfo.podIP, netInfo), - }, - }, - &nadapi.NetworkAttachmentDefinitionList{ - Items: []nadapi.NetworkAttachmentDefinition{*nad}, - }, - ) - podInfo.populateLogicalSwitchCache(fakeOvn) - - // on IC, the test itself spits out the pod with the - // annotations set, since on production it would be the - // clustermanager to annotate the pod. - if !config.OVNKubernetesFeature.EnableInterconnect { - By("asserting the pod originally does *not* feature the OVN pod networks annotation") - // pod exists, networks annotations don't - pod, err := fakeOvn.fakeClient.KubeClient.CoreV1().Pods(podInfo.namespace).Get(context.Background(), podInfo.podName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - _, ok := pod.Annotations[util.OvnPodAnnotationName] - Expect(ok).To(BeFalse()) - } - Expect(fakeOvn.controller.WatchNamespaces()).NotTo(HaveOccurred()) - Expect(fakeOvn.controller.WatchPods()).NotTo(HaveOccurred()) - By("asserting the pod (once reconciled) *features* the OVN pod networks annotation") - secondaryNetController, ok := fakeOvn.secondaryControllers[secondaryNetworkName] - Expect(ok).To(BeTrue()) - - secondaryNetController.bnc.ovnClusterLRPToJoinIfAddrs = dummyJoinIPs() - podInfo.populateSecondaryNetworkLogicalSwitchCache(fakeOvn, secondaryNetController) - Expect(secondaryNetController.bnc.WatchNodes()).To(Succeed()) - Expect(secondaryNetController.bnc.WatchPods()).To(Succeed()) + Expect(setupFakeOvnForLayer2Topology(fakeOvn, initialDB, netInfo, testNode, podInfo, pod)).To(Succeed()) // for layer2 on interconnect, it is the cluster manager that // allocates the OVN annotation; on unit tests, this just @@ -137,6 +115,7 @@ var _ = Describe("OVN Multi-Homed pod operations for layer2 network", func() { Expect(err).NotTo(HaveOccurred()) Expect(gwConfig.NextHops).NotTo(BeEmpty()) expectationOptions = append(expectationOptions, withGatewayConfig(gwConfig)) + expectationOptions = append(expectationOptions, withClusterPortGroup()) } By("asserting the OVN entities provisioned in the NBDB are the expected ones") Eventually(fakeOvn.nbClient).Should( @@ -145,46 +124,226 @@ var _ = Describe("OVN Multi-Homed pod operations for layer2 network", func() { fakeOvn, []testPod{podInfo}, expectationOptions..., - ).expectedLogicalSwitchesAndPorts()...)) + ).expectedLogicalSwitchesAndPorts(netInfo.isPrimary)...)) return nil } Expect(app.Run([]string{app.Name})).To(Succeed()) }, - table.Entry("pod on a user defined secondary network", + Entry("pod on a user defined secondary network", dummySecondaryLayer2UserDefinedNetwork("100.200.0.0/16"), nonICClusterTestConfiguration(), + config.GatewayModeShared, ), - table.Entry("pod on a user defined primary network on an IC cluster", + Entry("pod on a user defined primary network", dummyPrimaryLayer2UserDefinedNetwork("100.200.0.0/16"), + nonICClusterTestConfiguration(), + config.GatewayModeShared, + ), + + Entry("pod on a user defined secondary network on an IC cluster", + dummySecondaryLayer2UserDefinedNetwork("100.200.0.0/16"), + icClusterTestConfiguration(), + config.GatewayModeShared, + ), + + Entry("pod on a user defined primary network on an IC cluster", + dummyPrimaryLayer2UserDefinedNetwork("100.200.0.0/16"), + icClusterTestConfiguration(), + config.GatewayModeShared, + ), + + Entry("pod on a user defined primary network on an IC cluster; LGW", + dummyPrimaryLayer2UserDefinedNetwork("100.200.0.0/16"), + icClusterTestConfiguration(), + config.GatewayModeLocal, + ), + + Entry("pod on a user defined primary network on an IC cluster with per-pod SNATs enabled", + dummyPrimaryLayer2UserDefinedNetwork("100.200.0.0/16"), + icClusterTestConfiguration(func(testConfig *testConfiguration) { + testConfig.gatewayConfig = &config.GatewayConfig{DisableSNATMultipleGWs: true} + }), + config.GatewayModeShared, + ), + /** FIXME: tests do not support ipv6 yet + Entry("pod on a IPv6 user defined primary network on an IC cluster with per-pod SNATs enabled", + dummyPrimaryLayer2UserDefinedNetwork("2001:db8:abcd:0012::/64"), + icClusterWithDisableSNATTestConfiguration(), + config.GatewayModeShared, + ), + */ + ) + + DescribeTable( + "reconciles a new kubevirt-related pod during its live-migration phases", + func(netInfo secondaryNetInfo, testConfig testConfiguration, migrationInfo *liveMigrationInfo) { + const ( + sourcePodInfoIdx = 0 + targetPodInfoIdx = 1 + ) + sourcePodInfo := dummyL2TestPod(ns, netInfo, sourcePodInfoIdx) + setupConfig(netInfo, testConfig, config.GatewayModeShared) + app.Action = func(ctx *cli.Context) error { + sourcePod := newMultiHomedKubevirtPod( + migrationInfo.vmName, + migrationInfo.sourcePodInfo, + sourcePodInfo, + netInfo) + + const nodeIPv4CIDR = "192.168.126.202/24" + By(fmt.Sprintf("Creating a node named %q, with IP: %s", nodeName, nodeIPv4CIDR)) + testNode, err := newNodeWithSecondaryNets(nodeName, nodeIPv4CIDR, netInfo) + Expect(err).NotTo(HaveOccurred()) + + Expect(setupFakeOvnForLayer2Topology(fakeOvn, initialDB, netInfo, testNode, sourcePodInfo, sourcePod)).To(Succeed()) + + // for layer2 on interconnect, it is the cluster manager that + // allocates the OVN annotation; on unit tests, this just + // doesn't happen, and we create the pod with these annotations + // set. Hence, no point checking they're the expected ones. + // TODO: align the mocked annotations with the production code + // - currently missing setting the routes. + if !config.OVNKubernetesFeature.EnableInterconnect { + By("asserting the pod OVN pod networks annotation are the expected ones") + // check that after start networks annotations and nbdb will be updated + Eventually(func() string { + return getPodAnnotations(fakeOvn.fakeClient.KubeClient, sourcePodInfo.namespace, sourcePodInfo.podName) + }).WithTimeout(2 * time.Second).Should(MatchJSON(sourcePodInfo.getAnnotationsJson())) + } + + expectationOptions := testConfig.expectationOptions + if netInfo.isPrimary { + By("configuring the expectation machine with the GW related configuration") + gwConfig, err := util.ParseNodeL3GatewayAnnotation(testNode) + Expect(err).NotTo(HaveOccurred()) + Expect(gwConfig.NextHops).NotTo(BeEmpty()) + expectationOptions = append(expectationOptions, withGatewayConfig(gwConfig)) + expectationOptions = append(expectationOptions, withClusterPortGroup()) + } + By("asserting the OVN entities provisioned in the NBDB are the expected ones before migration started") + Eventually(fakeOvn.nbClient).Should( + libovsdbtest.HaveData( + newSecondaryNetworkExpectationMachine( + fakeOvn, + []testPod{sourcePodInfo}, + expectationOptions..., + ).expectedLogicalSwitchesAndPorts(netInfo.isPrimary)...)) + + targetPodInfo := dummyL2TestPod(ns, netInfo, targetPodInfoIdx) + targetKvPod := newMultiHomedKubevirtPod( + migrationInfo.vmName, + migrationInfo.targetPodInfo, + targetPodInfo, + netInfo) + + _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(targetKvPod.Namespace).Create(context.Background(), targetKvPod, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("asserting the OVN entities provisioned in the NBDB are the expected ones after migration") + expectedPodLspEnabled := map[string]*bool{} + expectedPodLspEnabled[sourcePodInfo.podName] = migrationInfo.sourcePodInfo.expectedLspEnabled + + testPods := []testPod{sourcePodInfo} + if !util.PodCompleted(targetKvPod) { + testPods = append(testPods, targetPodInfo) + expectedPodLspEnabled[targetPodInfo.podName] = migrationInfo.targetPodInfo.expectedLspEnabled + } + Eventually(fakeOvn.nbClient).Should( + libovsdbtest.HaveData( + newSecondaryNetworkExpectationMachine( + fakeOvn, + testPods, + expectationOptions..., + ).expectedLogicalSwitchesAndPortsWithLspEnabled(netInfo.isPrimary, expectedPodLspEnabled)...)) + return nil + } + + Expect(app.Run([]string{app.Name})).To(Succeed()) + }, + + Entry("on a layer2 topology with user defined secondary network, when target pod is not yet ready", + dummySecondaryLayer2UserDefinedNetwork("100.200.0.0/16"), + nonICClusterTestConfiguration(), + notReadyMigrationInfo(), + ), + + Entry("on a layer2 topology with user defined secondary network, when target pod is ready", + dummySecondaryLayer2UserDefinedNetwork("100.200.0.0/16"), + nonICClusterTestConfiguration(), + readyMigrationInfo(), + ), + + Entry("on a layer2 topology with user defined secondary network and an IC cluster, when target pod is not yet ready", + dummySecondaryLayer2UserDefinedNetwork("100.200.0.0/16"), + icClusterTestConfiguration(), + notReadyMigrationInfo(), + ), + + Entry("on a layer2 topology with user defined secondary network and an IC cluster, when target pod is ready", + dummySecondaryLayer2UserDefinedNetwork("100.200.0.0/16"), icClusterTestConfiguration(), + readyMigrationInfo(), ), - table.Entry("pod on a user defined secondary network", + Entry("on a layer2 topology with user defined secondary network and an IC cluster, when target pod failed", dummySecondaryLayer2UserDefinedNetwork("100.200.0.0/16"), + icClusterTestConfiguration(), + failedMigrationInfo(), + ), + + Entry("on a layer2 topology with user defined primary network, when target pod is not yet ready", + dummyPrimaryLayer2UserDefinedNetwork("100.200.0.0/16"), nonICClusterTestConfiguration(), + notReadyMigrationInfo(), ), - table.Entry("pod on a user defined primary network on an IC cluster", + Entry("on a layer2 topology with user defined primary network, when target pod is ready", + dummyPrimaryLayer2UserDefinedNetwork("100.200.0.0/16"), + nonICClusterTestConfiguration(), + readyMigrationInfo(), + ), + + Entry("on a layer2 topology with user defined primary network and an IC cluster, when target pod is not yet ready", + dummyPrimaryLayer2UserDefinedNetwork("100.200.0.0/16"), + icClusterTestConfiguration(), + notReadyMigrationInfo(), + ), + + Entry("on a layer2 topology with user defined primary network and an IC cluster, when target pod is ready", dummyPrimaryLayer2UserDefinedNetwork("100.200.0.0/16"), icClusterTestConfiguration(), + readyMigrationInfo(), + ), + + Entry("on a layer2 topology with user defined primary network and an IC cluster, when target pod failed", + dummyPrimaryLayer2UserDefinedNetwork("100.200.0.0/16"), + icClusterTestConfiguration(), + failedMigrationInfo(), ), ) - table.DescribeTable( + DescribeTable( "the gateway is properly cleaned up", func(netInfo secondaryNetInfo, testConfig testConfiguration) { podInfo := dummyTestPod(ns, netInfo) if testConfig.configToOverride != nil { config.OVNKubernetesFeature = *testConfig.configToOverride + if testConfig.gatewayConfig != nil { + config.Gateway.DisableSNATMultipleGWs = testConfig.gatewayConfig.DisableSNATMultipleGWs + } } app.Action = func(ctx *cli.Context) error { netConf := netInfo.netconf() networkConfig, err := util.NewNetInfo(netConf) Expect(err).NotTo(HaveOccurred()) + nadController := &nad.FakeNADController{ + PrimaryNetworks: map[string]util.NetInfo{}, + } + nadController.PrimaryNetworks[ns] = networkConfig nad, err := newNetworkAttachmentDefinition( ns, nadName, @@ -199,6 +358,7 @@ var _ = Describe("OVN Multi-Homed pod operations for layer2 network", func() { gwConfig, err := util.ParseNodeL3GatewayAnnotation(testNode) Expect(err).NotTo(HaveOccurred()) Expect(gwConfig.NextHops).NotTo(BeEmpty()) + nbZone := &nbdb.NBGlobal{Name: ovntypes.OvnDefaultZone, UUID: ovntypes.OvnDefaultZone} if netInfo.isPrimary { gwConfig, err := util.ParseNodeL3GatewayAnnotation(testNode) @@ -207,6 +367,7 @@ var _ = Describe("OVN Multi-Homed pod operations for layer2 network", func() { initialDB.NBData, expectedLayer2EgressEntities(networkConfig, *gwConfig, nodeName)...) } + initialDB.NBData = append(initialDB.NBData, nbZone) fakeOvn.startWithDBSetup( initialDB, @@ -220,7 +381,7 @@ var _ = Describe("OVN Multi-Homed pod operations for layer2 network", func() { }, &v1.PodList{ Items: []v1.Pod{ - *newMultiHomedPod(podInfo.namespace, podInfo.podName, podInfo.nodeName, podInfo.podIP, netInfo), + *newMultiHomedPod(podInfo, netInfo), }, }, &nadapi.NetworkAttachmentDefinitionList{ @@ -262,31 +423,47 @@ var _ = Describe("OVN Multi-Homed pod operations for layer2 network", func() { &secondaryNetController.bnc.CommonNetworkControllerInfo, networkConfig, nodeName, + nadController, ).Cleanup()).To(Succeed()) - Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData([]libovsdbtest.TestData{})) + Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData([]libovsdbtest.TestData{nbZone})) return nil } Expect(app.Run([]string{app.Name})).To(Succeed()) }, - table.Entry("pod on a user defined primary network", + Entry("pod on a user defined primary network", dummyLayer2PrimaryUserDefinedNetwork("192.168.0.0/16"), nonICClusterTestConfiguration(), ), - table.Entry("pod on a user defined primary network on an interconnect cluster", + Entry("pod on a user defined primary network on an IC cluster", dummyLayer2PrimaryUserDefinedNetwork("192.168.0.0/16"), icClusterTestConfiguration(), ), + Entry("pod on a user defined primary network on an IC cluster with per-pod SNATs enabled", + dummyLayer2PrimaryUserDefinedNetwork("192.168.0.0/16"), + icClusterTestConfiguration(func(testConfig *testConfiguration) { + testConfig.gatewayConfig = &config.GatewayConfig{DisableSNATMultipleGWs: true} + }), + ), ) }) +func dummyLocalnetWithSecondaryUserDefinedNetwork(subnets string) secondaryNetInfo { + return secondaryNetInfo{ + netName: secondaryNetworkName, + nadName: namespacedName(ns, nadName), + topology: ovntypes.LocalnetTopology, + clustersubnets: subnets, + } +} + func dummySecondaryLayer2UserDefinedNetwork(subnets string) secondaryNetInfo { return secondaryNetInfo{ - netName: secondaryNetworkName, - nadName: namespacedName(ns, nadName), - topology: ovntypes.Layer2Topology, - subnets: subnets, + netName: secondaryNetworkName, + nadName: namespacedName(ns, nadName), + topology: ovntypes.Layer2Topology, + clustersubnets: subnets, } } @@ -296,10 +473,11 @@ func dummyPrimaryLayer2UserDefinedNetwork(subnets string) secondaryNetInfo { return secondaryNet } -func dummyL2TestPod(nsName string, info secondaryNetInfo) testPod { +func dummyL2TestPod(nsName string, info secondaryNetInfo, podIdx int) testPod { const nodeSubnet = "10.128.1.0/24" + if info.isPrimary { - pod := newTPod(nodeName, nodeSubnet, "10.128.1.2", "", "myPod", "10.128.1.3", "0a:58:0a:80:01:03", nsName) + pod := newTPod(nodeName, nodeSubnet, "10.128.1.2", "", fmt.Sprintf("myPod-%d", podIdx), fmt.Sprintf("10.128.1.%d", podIdx+3), fmt.Sprintf("0a:58:0a:80:01:%0.2d", podIdx+3), nsName) pod.networkRole = "infrastructure-locked" pod.routes = append( pod.routes, @@ -315,11 +493,11 @@ func dummyL2TestPod(nsName string, info secondaryNetInfo) testPod { pod.addNetwork( info.netName, info.nadName, - info.subnets, + info.clustersubnets, "", "100.200.0.1", - "100.200.0.3/16", - "0a:58:64:c8:00:03", + fmt.Sprintf("100.200.0.%d/16", podIdx+3), + fmt.Sprintf("0a:58:64:c8:00:%0.2d", podIdx+3), "primary", 0, []util.PodRoute{ @@ -335,15 +513,15 @@ func dummyL2TestPod(nsName string, info secondaryNetInfo) testPod { ) return pod } - pod := newTPod(nodeName, nodeSubnet, "10.128.1.2", "10.128.1.1", podName, "10.128.1.3", "0a:58:0a:80:01:03", nsName) + pod := newTPod(nodeName, nodeSubnet, "10.128.1.2", "10.128.1.1", fmt.Sprintf("%s-%d", podName, podIdx), fmt.Sprintf("10.128.1.%d", podIdx+3), fmt.Sprintf("0a:58:0a:80:01:%0.2d", podIdx+3), nsName) pod.addNetwork( info.netName, info.nadName, - info.subnets, + info.clustersubnets, "", "", - "100.200.0.1/16", - "0a:58:64:c8:00:01", + fmt.Sprintf("100.200.0.%d/16", podIdx+1), + fmt.Sprintf("0a:58:64:c8:00:%0.2d", podIdx+1), "secondary", 0, []util.PodRoute{}, @@ -351,54 +529,100 @@ func dummyL2TestPod(nsName string, info secondaryNetInfo) testPod { return pod } +func dummyL2TestPodAdditionalNetworkIP() string { + const podIdx = 0 + secNetInfo := dummyPrimaryLayer2UserDefinedNetwork("100.200.0.0/16") + return dummyL2TestPod(ns, secNetInfo, podIdx).getNetworkPortInfo(secNetInfo.netName, secNetInfo.nadName).podIP +} + func expectedLayer2EgressEntities(netInfo util.NetInfo, gwConfig util.L3GatewayConfig, nodeName string) []libovsdbtest.TestData { const ( - nat1 = "nat1-UUID" - nat2 = "nat2-UUID" - nat3 = "nat3-UUID" - sr1 = "sr1-UUID" - sr2 = "sr2-UUID" - routerPolicyUUID1 = "lrp1-UUID" + nat1 = "nat1-UUID" + nat2 = "nat2-UUID" + nat3 = "nat3-UUID" + perPodSNAT = "pod-snat-UUID" + sr1 = "sr1-UUID" + sr2 = "sr2-UUID" + lrsr1 = "lrsr1-UUID" + routerPolicyUUID1 = "lrp1-UUID" + hostCIDRPolicyUUID = "host-cidr-policy-UUID" + masqSNATUUID1 = "masq-snat1-UUID" ) gwRouterName := fmt.Sprintf("GR_%s_test-node", netInfo.GetNetworkName()) staticRouteOutputPort := ovntypes.GWRouterToExtSwitchPrefix + gwRouterName - gwRouterToNetworkSwitchPortName := ovntypes.GWRouterToJoinSwitchPrefix + gwRouterName + gwRouterToNetworkSwitchPortName := ovntypes.RouterToSwitchPrefix + netInfo.GetNetworkScopedName(ovntypes.OVNLayer2Switch) gwRouterToExtSwitchPortName := fmt.Sprintf("%s%s", ovntypes.GWRouterToExtSwitchPrefix, gwRouterName) + masqSNAT := newMasqueradeManagementNATEntry(masqSNATUUID1, "169.254.169.14", layer2Subnet().String(), netInfo) + var nat []string + if config.Gateway.DisableSNATMultipleGWs { + nat = append(nat, nat1, nat3, perPodSNAT, masqSNATUUID1) + } else { + nat = append(nat, nat1, nat2, nat3, masqSNATUUID1) + } + gr := &nbdb.LogicalRouter{ + Name: gwRouterName, + UUID: gwRouterName + "-UUID", + Nat: nat, + Ports: []string{gwRouterToNetworkSwitchPortName + "-UUID", gwRouterToExtSwitchPortName + "-UUID"}, + StaticRoutes: []string{sr1, sr2}, + ExternalIDs: gwRouterExternalIDs(netInfo, gwConfig), + Options: gwRouterOptions(gwConfig), + Policies: []string{routerPolicyUUID1}, + } + gr.Options["lb_force_snat_ip"] = gwRouterJoinIPAddress().IP.String() expectedEntities := []libovsdbtest.TestData{ - &nbdb.LogicalRouter{ - Name: gwRouterName, - UUID: gwRouterName + "-UUID", - Nat: []string{nat1, nat2, nat3}, - Ports: []string{gwRouterToNetworkSwitchPortName + "-UUID", gwRouterToExtSwitchPortName + "-UUID"}, - StaticRoutes: []string{sr1, sr2}, - ExternalIDs: gwRouterExternalIDs(netInfo, gwConfig), - Options: gwRouterOptions(gwConfig), - Policies: []string{routerPolicyUUID1}, - }, - expectedGWToNetworkSwitchRouterPort(gwRouterToNetworkSwitchPortName, netInfo, gwRouterIPAddress(), layer2SubnetGWAddr()), + gr, + expectedGWToNetworkSwitchRouterPort(gwRouterToNetworkSwitchPortName, netInfo, gwRouterJoinIPAddress(), layer2SubnetGWAddr()), expectedGRStaticRoute(sr1, dummyMasqueradeSubnet().String(), nextHopMasqueradeIP().String(), nil, &staticRouteOutputPort, netInfo), expectedGRStaticRoute(sr2, ipv4DefaultRoute().String(), nodeGateway().IP.String(), nil, &staticRouteOutputPort, netInfo), - - newNATEntry(nat1, dummyJoinIP().IP.String(), gwRouterIPAddress().IP.String(), standardNonDefaultNetworkExtIDs(netInfo)), - newNATEntry(nat2, dummyJoinIP().IP.String(), layer2Subnet().String(), standardNonDefaultNetworkExtIDs(netInfo)), - newNATEntry(nat3, dummyJoinIP().IP.String(), layer2SubnetGWAddr().IP.String(), standardNonDefaultNetworkExtIDs(netInfo)), - expectedGRToExternalSwitchLRP(gwRouterName, netInfo, nodePhysicalIPAddress(), udnGWSNATAddress()), - expectedStaticMACBinding(gwRouterName, nextHopMasqueradeIP()), - + masqSNAT, expectedLogicalRouterPolicy(routerPolicyUUID1, netInfo, nodeName, nodeIP().IP.String(), managementPortIP(layer2Subnet()).String()), } - for _, entity := range expectedExternalSwitchAndLSPs(netInfo, gwConfig, nodeName) { - expectedEntities = append(expectedEntities, entity) + expectedEntities = append(expectedEntities, expectedStaticMACBindings(gwRouterName, staticMACBindingIPs())...) + + if config.Gateway.Mode == config.GatewayModeLocal { + l2LGWLRP := expectedLogicalRouterPolicy(hostCIDRPolicyUUID, netInfo, nodeName, nodeCIDR().String(), managementPortIP(layer2Subnet()).String()) + l2LGWLRP.Match = fmt.Sprintf(`ip4.dst == %s && ip4.src == %s`, nodeCIDR().String(), layer2Subnet().String()) + l2LGWLRP.Priority, _ = strconv.Atoi(ovntypes.UDNHostCIDRPolicyPriority) + expectedEntities = append(expectedEntities, l2LGWLRP) + gr.Policies = append(gr.Policies, hostCIDRPolicyUUID) + lrsr := expectedGRStaticRoute(lrsr1, layer2Subnet().String(), managementPortIP(layer2Subnet()).String(), + &nbdb.LogicalRouterStaticRoutePolicySrcIP, nil, netInfo) + expectedEntities = append(expectedEntities, lrsr) + gr.StaticRoutes = append(gr.StaticRoutes, lrsr1) + } + + expectedEntities = append(expectedEntities, expectedExternalSwitchAndLSPs(netInfo, gwConfig, nodeName)...) + if config.Gateway.DisableSNATMultipleGWs { + expectedEntities = append(expectedEntities, newNATEntry(nat1, dummyMasqueradeIP().IP.String(), gwRouterJoinIPAddress().IP.String(), standardNonDefaultNetworkExtIDs(netInfo), "")) + expectedEntities = append(expectedEntities, newNATEntry(nat3, dummyMasqueradeIP().IP.String(), layer2SubnetGWAddr().IP.String(), standardNonDefaultNetworkExtIDs(netInfo), "")) + expectedEntities = append(expectedEntities, newNATEntry(perPodSNAT, dummyMasqueradeIP().IP.String(), dummyL2TestPodAdditionalNetworkIP(), nil, fmt.Sprintf("outport == %q", gwRouterToExtSwitchPortName))) + } else { + expectedEntities = append(expectedEntities, newNATEntry(nat1, dummyMasqueradeIP().IP.String(), gwRouterJoinIPAddress().IP.String(), standardNonDefaultNetworkExtIDs(netInfo), "")) + expectedEntities = append(expectedEntities, newNATEntry(nat2, dummyMasqueradeIP().IP.String(), layer2Subnet().String(), standardNonDefaultNetworkExtIDs(netInfo), fmt.Sprintf("outport == %q", gwRouterToExtSwitchPortName))) + expectedEntities = append(expectedEntities, newNATEntry(nat3, dummyMasqueradeIP().IP.String(), layer2SubnetGWAddr().IP.String(), standardNonDefaultNetworkExtIDs(netInfo), "")) } return expectedEntities } func expectedGWToNetworkSwitchRouterPort(name string, netInfo util.NetInfo, networks ...*net.IPNet) *nbdb.LogicalRouterPort { options := map[string]string{"gateway_mtu": fmt.Sprintf("%d", 1400)} - return expectedLogicalRouterPort(name, netInfo, options, networks...) + lrp := expectedLogicalRouterPort(name, netInfo, options, networks...) + + if config.IPv6Mode { + lrp.Ipv6RaConfigs = map[string]string{ + "address_mode": "dhcpv6_stateful", + "mtu": "1400", + "send_periodic": "true", + "max_interval": "900", + "min_interval": "300", + "router_preference": "LOW", + } + } + return lrp } func layer2Subnet() *net.IPNet { @@ -431,10 +655,10 @@ func ipv4DefaultRoute() *net.IPNet { func dummyLayer2SecondaryUserDefinedNetwork(subnets string) secondaryNetInfo { return secondaryNetInfo{ - netName: secondaryNetworkName, - nadName: namespacedName(ns, nadName), - topology: ovntypes.Layer2Topology, - subnets: subnets, + netName: secondaryNetworkName, + nadName: namespacedName(ns, nadName), + topology: ovntypes.Layer2Topology, + clustersubnets: subnets, } } @@ -444,8 +668,9 @@ func dummyLayer2PrimaryUserDefinedNetwork(subnets string) secondaryNetInfo { return secondaryNet } -func newSecondaryLayer2NetworkController(cnci *CommonNetworkControllerInfo, netInfo util.NetInfo, nodeName string) *SecondaryLayer2NetworkController { - layer2NetworkController := NewSecondaryLayer2NetworkController(cnci, netInfo) +func newSecondaryLayer2NetworkController(cnci *CommonNetworkControllerInfo, netInfo util.NetInfo, nodeName string, + nadController networkAttachDefController.NADController) *SecondaryLayer2NetworkController { + layer2NetworkController, _ := NewSecondaryLayer2NetworkController(cnci, netInfo, nadController) layer2NetworkController.gatewayManagers.Store( nodeName, newDummyGatewayManager(cnci.kube, cnci.nbClient, netInfo, cnci.watchFactory, nodeName), @@ -459,3 +684,164 @@ func nodeIP() *net.IPNet { Mask: net.CIDRMask(24, 32), } } + +func nodeCIDR() *net.IPNet { + return &net.IPNet{ + IP: net.ParseIP("192.168.126.0"), + Mask: net.CIDRMask(24, 32), + } +} + +func setupFakeOvnForLayer2Topology(fakeOvn *FakeOVN, initialDB libovsdbtest.TestSetup, netInfo secondaryNetInfo, testNode *v1.Node, podInfo testPod, pod *v1.Pod) error { + By(fmt.Sprintf("creating a network attachment definition for network: %s", netInfo.netName)) + By(fmt.Sprintf("creating a network attachment definition for network: %s", netInfo.netName)) + nad, err := newNetworkAttachmentDefinition( + ns, + nadName, + *netInfo.netconf(), + ) + Expect(err).NotTo(HaveOccurred()) + By("setting up the OVN DB without any entities in it") + Expect(netInfo.setupOVNDependencies(&initialDB)).To(Succeed()) + + if netInfo.isPrimary { + networkConfig, err := util.NewNetInfo(netInfo.netconf()) + Expect(err).NotTo(HaveOccurred()) + + initialDB.NBData = append( + initialDB.NBData, + &nbdb.LogicalRouter{ + Name: fmt.Sprintf("GR_%s_%s", networkConfig.GetNetworkName(), nodeName), + ExternalIDs: standardNonDefaultNetworkExtIDs(networkConfig), + }, + newNetworkClusterPortGroup(networkConfig), + ) + } + + fakeOvn.startWithDBSetup( + initialDB, + &v1.NamespaceList{ + Items: []v1.Namespace{ + *newNamespace(ns), + }, + }, + &v1.NodeList{Items: []v1.Node{*testNode}}, + &v1.PodList{ + Items: []v1.Pod{ + *pod, + }, + }, + &nadapi.NetworkAttachmentDefinitionList{ + Items: []nadapi.NetworkAttachmentDefinition{*nad}, + }, + ) + podInfo.populateLogicalSwitchCache(fakeOvn) + + // on IC, the test itself spits out the pod with the + // annotations set, since on production it would be the + // clustermanager to annotate the pod. + if !config.OVNKubernetesFeature.EnableInterconnect { + By("asserting the pod originally does *not* feature the OVN pod networks annotation") + // pod exists, networks annotations don't + pod, err := fakeOvn.fakeClient.KubeClient.CoreV1().Pods(podInfo.namespace).Get(context.Background(), podInfo.podName, metav1.GetOptions{}) + if err != nil { + return err + } + _, ok := pod.Annotations[util.OvnPodAnnotationName] + if ok { + return fmt.Errorf("expected pod annotation %q", util.OvnPodAnnotationName) + } + } + if err = fakeOvn.controller.nadController.Start(); err != nil { + return err + } + + if err = fakeOvn.controller.WatchNamespaces(); err != nil { + return err + } + if err = fakeOvn.controller.WatchPods(); err != nil { + return err + } + By("asserting the pod (once reconciled) *features* the OVN pod networks annotation") + secondaryNetController, doesControllerExist := fakeOvn.secondaryControllers[secondaryNetworkName] + if !doesControllerExist { + return fmt.Errorf("expected secondary network controller to exist") + } + + secondaryNetController.bnc.ovnClusterLRPToJoinIfAddrs = dummyJoinIPs() + podInfo.populateSecondaryNetworkLogicalSwitchCache(fakeOvn, secondaryNetController) + if err = secondaryNetController.bnc.WatchNodes(); err != nil { + return err + } + if err = secondaryNetController.bnc.WatchPods(); err != nil { + return err + } + + return nil +} + +func setupConfig(netInfo secondaryNetInfo, testConfig testConfiguration, gatewayMode config.GatewayMode) { + if testConfig.configToOverride != nil { + config.OVNKubernetesFeature = *testConfig.configToOverride + if testConfig.gatewayConfig != nil { + config.Gateway.DisableSNATMultipleGWs = testConfig.gatewayConfig.DisableSNATMultipleGWs + } + } + config.Gateway.Mode = gatewayMode + if knet.IsIPv6CIDRString(netInfo.clustersubnets) { + config.IPv6Mode = true + // tests dont support dualstack yet + config.IPv4Mode = false + } +} + +func notReadyMigrationInfo() *liveMigrationInfo { + const vmName = "my-vm" + return &liveMigrationInfo{ + vmName: vmName, + sourcePodInfo: liveMigrationPodInfo{ + podPhase: v1.PodRunning, + creationTimestamp: metav1.NewTime(time.Now().Add(-time.Hour)), + expectedLspEnabled: lspEnableNotSpecified, + }, + targetPodInfo: liveMigrationPodInfo{ + podPhase: v1.PodRunning, + creationTimestamp: metav1.NewTime(time.Now()), + expectedLspEnabled: lspEnableExplicitlyFalse, + }, + } +} + +func readyMigrationInfo() *liveMigrationInfo { + const vmName = "my-vm" + return &liveMigrationInfo{ + vmName: vmName, + sourcePodInfo: liveMigrationPodInfo{ + podPhase: v1.PodRunning, + creationTimestamp: metav1.NewTime(time.Now().Add(-time.Hour)), + expectedLspEnabled: lspEnableExplicitlyFalse, + }, + targetPodInfo: liveMigrationPodInfo{ + podPhase: v1.PodRunning, + creationTimestamp: metav1.NewTime(time.Now()), + annotation: map[string]string{kubevirtv1.MigrationTargetReadyTimestamp: "some-timestamp"}, + expectedLspEnabled: lspEnableExplicitlyTrue, + }, + } +} + +func failedMigrationInfo() *liveMigrationInfo { + const vmName = "my-vm" + return &liveMigrationInfo{ + vmName: vmName, + sourcePodInfo: liveMigrationPodInfo{ + podPhase: v1.PodRunning, + creationTimestamp: metav1.NewTime(time.Now().Add(-time.Hour)), + expectedLspEnabled: lspEnableExplicitlyTrue, + }, + targetPodInfo: liveMigrationPodInfo{ + podPhase: v1.PodFailed, + creationTimestamp: metav1.NewTime(time.Now()), + }, + } +} diff --git a/go-controller/pkg/ovn/secondary_layer3_network_controller.go b/go-controller/pkg/ovn/secondary_layer3_network_controller.go index eba5cd2f1a..1c99522826 100644 --- a/go-controller/pkg/ovn/secondary_layer3_network_controller.go +++ b/go-controller/pkg/ovn/secondary_layer3_network_controller.go @@ -15,8 +15,11 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/generator/udn" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + nad "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/network-attach-def-controller" addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" + svccontroller "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/controller/services" lsm "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/logical_switch_manager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/topology" zoneic "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/zone_interconnect" @@ -156,8 +159,8 @@ func (h *secondaryLayer3NetworkControllerEventHandler) UpdateResource(oldObj, ne return fmt.Errorf("could not cast oldObj of type %T to *kapi.Node", oldObj) } newNodeIsLocalZoneNode := h.oc.isLocalZoneNode(newNode) - zoneClusterChanged := h.oc.nodeZoneClusterChanged(oldNode, newNode, newNodeIsLocalZoneNode) - nodeSubnetChanged := nodeSubnetChanged(oldNode, newNode) + zoneClusterChanged := h.oc.nodeZoneClusterChanged(oldNode, newNode, newNodeIsLocalZoneNode, h.oc.NetInfo.GetNetworkName()) + nodeSubnetChanged := nodeSubnetChanged(oldNode, newNode, h.oc.NetInfo.GetNetworkName()) if newNodeIsLocalZoneNode { var nodeSyncsParam *nodeSyncs if h.oc.isLocalZoneNode(oldNode) { @@ -165,6 +168,7 @@ func (h *secondaryLayer3NetworkControllerEventHandler) UpdateResource(oldObj, ne _, nodeSync := h.oc.addNodeFailed.Load(newNode.Name) _, failed := h.oc.nodeClusterRouterPortFailed.Load(newNode.Name) clusterRtrSync := failed || nodeChassisChanged(oldNode, newNode) || nodeSubnetChanged + _, failed = h.oc.mgmtPortFailed.Load(newNode.Name) syncMgmtPort := failed || macAddressChanged(oldNode, newNode, h.oc.GetNetworkName()) || nodeSubnetChanged _, syncZoneIC := h.oc.syncZoneICFailed.Load(newNode.Name) syncZoneIC = syncZoneIC || zoneClusterChanged @@ -246,6 +250,9 @@ func (h *secondaryLayer3NetworkControllerEventHandler) SyncFunc(objs []interface case factory.NamespaceType: syncFunc = h.oc.syncNamespaces + case factory.PolicyType: + syncFunc = h.oc.syncNetworkPolicies + case factory.MultiNetworkPolicyType: syncFunc = h.oc.syncMultiNetworkPolicies @@ -277,15 +284,34 @@ type SecondaryLayer3NetworkController struct { syncZoneICFailed sync.Map gatewaysFailed sync.Map + gatewayManagers sync.Map + gatewayTopologyFactory *topology.GatewayTopologyFactory + + // Cluster wide Load_Balancer_Group UUID. + // Includes all node switches and node gateway routers. + clusterLoadBalancerGroupUUID string + + // Cluster wide switch Load_Balancer_Group UUID. + // Includes all node switches. + switchLoadBalancerGroupUUID string + + // Cluster wide router Load_Balancer_Group UUID. + // Includes all node gateway routers. + routerLoadBalancerGroupUUID string + // Cluster-wide router default Control Plane Protection (COPP) UUID defaultCOPPUUID string - gatewayManagers sync.Map - gatewayTopologyFactory *topology.GatewayTopologyFactory + // Controller in charge of services + svcController *svccontroller.Controller + + // EgressIP controller utilized only to initialize a network with OVN polices to support EgressIP functionality. + eIPController *EgressIPController } // NewSecondaryLayer3NetworkController create a new OVN controller for the given secondary layer3 NAD -func NewSecondaryLayer3NetworkController(cnci *CommonNetworkControllerInfo, netInfo util.NetInfo) *SecondaryLayer3NetworkController { +func NewSecondaryLayer3NetworkController(cnci *CommonNetworkControllerInfo, netInfo util.NetInfo, nadController nad.NADController, + eIPController *EgressIPController, portCache *PortCache) (*SecondaryLayer3NetworkController, error) { stopChan := make(chan struct{}) ipv4Mode, ipv6Mode := netInfo.IPMode() @@ -296,6 +322,23 @@ func NewSecondaryLayer3NetworkController(cnci *CommonNetworkControllerInfo, netI addressSetFactory := addressset.NewOvnAddressSetFactory(cnci.nbClient, ipv4Mode, ipv6Mode) + var svcController *svccontroller.Controller + if util.IsNetworkSegmentationSupportEnabled() && netInfo.IsPrimaryNetwork() { + var err error + svcController, err = svccontroller.NewController( + cnci.client, cnci.nbClient, + cnci.watchFactory.ServiceCoreInformer(), + cnci.watchFactory.EndpointSliceCoreInformer(), + cnci.watchFactory.NodeCoreInformer(), + nadController, + cnci.recorder, + netInfo, + ) + if err != nil { + return nil, fmt.Errorf("unable to create new service controller for network=%s: %w", netInfo.GetNetworkName(), err) + } + } + oc := &SecondaryLayer3NetworkController{ BaseSecondaryNetworkController: BaseSecondaryNetworkController{ BaseNetworkController: BaseNetworkController{ @@ -303,7 +346,7 @@ func NewSecondaryLayer3NetworkController(cnci *CommonNetworkControllerInfo, netI controllerName: getNetworkControllerName(netInfo.GetNetworkName()), NetInfo: netInfo, lsManager: lsm.NewLogicalSwitchManager(), - logicalPortCache: newPortCache(stopChan), + logicalPortCache: portCache, namespaces: make(map[string]*namespaceInfo), namespacesMutex: sync.Mutex{}, addressSetFactory: addressSetFactory, @@ -315,6 +358,7 @@ func NewSecondaryLayer3NetworkController(cnci *CommonNetworkControllerInfo, netI localZoneNodes: &sync.Map{}, zoneICHandler: zoneICHandler, cancelableCtx: util.NewCancelableContext(), + nadController: nadController, }, }, mgmtPortFailed: sync.Map{}, @@ -324,6 +368,8 @@ func NewSecondaryLayer3NetworkController(cnci *CommonNetworkControllerInfo, netI gatewaysFailed: sync.Map{}, gatewayTopologyFactory: topology.NewGatewayTopologyFactory(cnci.nbClient), gatewayManagers: sync.Map{}, + svcController: svcController, + eIPController: eIPController, } if oc.allocatesPodAnnotation() { @@ -335,23 +381,31 @@ func NewSecondaryLayer3NetworkController(cnci *CommonNetworkControllerInfo, netI oc.podAnnotationAllocator = podAnnotationAllocator } - // disable multicast support for secondary networks - // TBD: changes needs to be made to support multicast in secondary networks - oc.multicastSupport = false + // enable multicast support for UDN only for primaries + multicast enabled + // TBD: changes needs to be made to support multicast beyond primary UDN + oc.multicastSupport = oc.IsPrimaryNetwork() && util.IsNetworkSegmentationSupportEnabled() && config.EnableMulticast oc.initRetryFramework() - return oc + return oc, nil } func (oc *SecondaryLayer3NetworkController) initRetryFramework() { oc.retryPods = oc.newRetryFramework(factory.PodType) oc.retryNodes = oc.newRetryFramework(factory.NodeType) + // When a user-defined network is enabled as a primary network for namespace, + // then watch for namespace and network policy events. + if oc.IsPrimaryNetwork() { + oc.retryNamespaces = oc.newRetryFramework(factory.NamespaceType) + oc.retryNetworkPolicies = oc.newRetryFramework(factory.PolicyType) + } + // For secondary networks, we don't have to watch namespace events if - // multi-network policy support is not enabled. + // multi-network policy support is not enabled. We don't support + // multi-network policy for IPAM-less secondary networks either. if util.IsMultiNetworkPoliciesSupportEnabled() { oc.retryNamespaces = oc.newRetryFramework(factory.NamespaceType) - oc.retryNetworkPolicies = oc.newRetryFramework(factory.MultiNetworkPolicyType) + oc.retryMultiNetworkPolicies = oc.newRetryFramework(factory.MultiNetworkPolicyType) } } @@ -382,8 +436,11 @@ func (oc *SecondaryLayer3NetworkController) newRetryFramework( // Start starts the secondary layer3 controller, handles all events and creates all needed logical entities func (oc *SecondaryLayer3NetworkController) Start(ctx context.Context) error { klog.Infof("Start secondary %s network controller of network %s", oc.TopologyType(), oc.GetNetworkName()) - - if err := oc.Init(ctx); err != nil { + _, err := oc.getNetworkID() + if err != nil { + return fmt.Errorf("unable to set networkID on secondary L3 controller for network %s, err: %w", oc.GetNetworkName(), err) + } + if err = oc.Init(ctx); err != nil { return err } @@ -397,8 +454,11 @@ func (oc *SecondaryLayer3NetworkController) Stop() { oc.cancelableCtx.Cancel() oc.wg.Wait() - if oc.policyHandler != nil { - oc.watchFactory.RemoveMultiNetworkPolicyHandler(oc.policyHandler) + if oc.netPolicyHandler != nil { + oc.watchFactory.RemovePolicyHandler(oc.netPolicyHandler) + } + if oc.multiNetPolicyHandler != nil { + oc.watchFactory.RemoveMultiNetworkPolicyHandler(oc.multiNetPolicyHandler) } if oc.podHandler != nil { oc.watchFactory.RemovePodHandler(oc.podHandler) @@ -487,13 +547,34 @@ func (oc *SecondaryLayer3NetworkController) Run() error { return err } + if oc.svcController != nil { + startSvc := time.Now() + // Services should be started after nodes to prevent LB churn + err := oc.StartServiceController(oc.wg, true) + endSvc := time.Since(startSvc) + + metrics.MetricOVNKubeControllerSyncDuration.WithLabelValues("service_" + oc.GetNetworkName()).Set(endSvc.Seconds()) + if err != nil { + return err + } + } + if err := oc.WatchPods(); err != nil { return err } - // WatchMultiNetworkPolicy depends on WatchPods and WatchNamespaces - if err := oc.WatchMultiNetworkPolicy(); err != nil { - return err + if util.IsMultiNetworkPoliciesSupportEnabled() { + // WatchMultiNetworkPolicy depends on WatchPods and WatchNamespaces + if err := oc.WatchMultiNetworkPolicy(); err != nil { + return err + } + } + + if oc.IsPrimaryNetwork() { + // WatchNetworkPolicy depends on WatchPods and WatchNamespaces + if err := oc.WatchNetworkPolicy(); err != nil { + return err + } } klog.Infof("Completing all the Watchers for network %s took %v", oc.GetNetworkName(), time.Since(start)) @@ -531,11 +612,33 @@ func (oc *SecondaryLayer3NetworkController) Init(ctx context.Context) error { return fmt.Errorf("failed to create OVN cluster router for network %q: %v", oc.GetNetworkName(), err) } - // Only configure join switch and GR for user defined primary networks. + // Only configure join switch, GR, cluster port groups and multicast default policies for user defined primary networks. if util.IsNetworkSegmentationSupportEnabled() && oc.IsPrimaryNetwork() { if err := oc.gatewayTopologyFactory.NewJoinSwitch(clusterRouter, oc.NetInfo, oc.ovnClusterLRPToJoinIfAddrs); err != nil { return fmt.Errorf("failed to create join switch for network %q: %v", oc.GetNetworkName(), err) } + + if err := oc.setupClusterPortGroups(); err != nil { + return fmt.Errorf("failed to create cluster port groups for network %q: %w", oc.GetNetworkName(), err) + } + + if err := oc.syncDefaultMulticastPolicies(); err != nil { + return fmt.Errorf("failed to sync default multicast policies for network %q: %w", oc.GetNetworkName(), err) + } + } + + // FIXME: When https://github.com/ovn-org/libovsdb/issues/235 is fixed, + // use IsTableSupported(nbdb.LoadBalancerGroup). + if _, _, err := util.RunOVNNbctl("--columns=_uuid", "list", "Load_Balancer_Group"); err != nil { + klog.Warningf("Load Balancer Group support enabled, however version of OVN in use does not support Load Balancer Groups.") + } else { + clusterLBGroupUUID, switchLBGroupUUID, routerLBGroupUUID, err := initLoadBalancerGroups(oc.nbClient, oc.NetInfo) + if err != nil { + return err + } + oc.clusterLoadBalancerGroupUUID = clusterLBGroupUUID + oc.switchLoadBalancerGroupUUID = switchLBGroupUUID + oc.routerLoadBalancerGroupUUID = routerLBGroupUUID } return nil } @@ -586,7 +689,7 @@ func (oc *SecondaryLayer3NetworkController) addUpdateLocalNodeEvent(node *kapi.N errs = append(errs, err) oc.mgmtPortFailed.Store(node.Name, true) } else { - _, err = oc.syncNodeManagementPortRouteHostSubnets(node, oc.GetNetworkScopedSwitchName(node.Name), hostSubnets) + _, err = oc.syncNodeManagementPort(node, oc.GetNetworkScopedSwitchName(node.Name), oc.GetNetworkScopedClusterRouterName(), hostSubnets) if err != nil { errs = append(errs, err) oc.mgmtPortFailed.Store(node.Name, true) @@ -618,10 +721,10 @@ func (oc *SecondaryLayer3NetworkController) addUpdateLocalNodeEvent(node *kapi.N gwConfig.config, gwConfig.hostSubnets, gwConfig.hostAddrs, - gwConfig.hostSubnets, - gwConfig.gwLRPIPs, + gwConfig.clusterSubnets, + gwConfig.gwLRPJoinIPs, // the joinIP allocated to this node for this controller's network oc.SCTPSupport, - oc.ovnClusterLRPToJoinIfAddrs, + oc.ovnClusterLRPToJoinIfAddrs, // the .1 of this controller's global joinSubnet gwConfig.externalIPs, ); err != nil { errs = append(errs, fmt.Errorf( @@ -655,6 +758,15 @@ func (oc *SecondaryLayer3NetworkController) addUpdateLocalNodeEvent(node *kapi.N } } + if config.OVNKubernetesFeature.EnableEgressIP && util.IsNetworkSegmentationSupportEnabled() && oc.IsPrimaryNetwork() { + if err = oc.eIPController.ensureL3ClusterRouterPoliciesForNetwork(oc.NetInfo); err != nil { + errs = append(errs, fmt.Errorf("failed to add network %s to EgressIP controller: %v", oc.NetInfo.GetNetworkName(), err)) + } + if err = oc.eIPController.ensureL3SwitchPoliciesForNode(oc.NetInfo, node.Name); err != nil { + errs = append(errs, fmt.Errorf("failed to ensure EgressIP switch policies: %v", err)) + } + } + err = utilerrors.Join(errs...) if err != nil { oc.recordNodeErrorEvent(node, err) @@ -683,6 +795,36 @@ func (oc *SecondaryLayer3NetworkController) addUpdateRemoteNodeEvent(node *kapi. return err } +// addNodeSubnetEgressSNAT adds the SNAT on each node's ovn-cluster-router in L3 networks +// snat eth.dst == d6:cf:fd:2c:a6:44 169.254.0.12 10.128.0.0/24 +// snat eth.dst == d6:cf:fd:2c:a6:44 169.254.0.12 2010:100:200::/64 +// these SNATs are required for pod2Egress traffic in LGW mode and pod2SameNode traffic in SGW mode to function properly on UDNs +// SNAT Breakdown: +// match = "eth.dst == d6:cf:fd:2c:a6:44"; the MAC here is the mpX interface MAC address for this UDN +// logicalIP = "10.128.0.0/24"; which is the podsubnet for this node in L3 UDN +// externalIP = "169.254.0.12"; which is the masqueradeIP for this L3 UDN +// so all in all we want to condionally SNAT all packets that are coming from pods hosted on this node, +// which are leaving via UDN's mpX interface to the UDN's masqueradeIP. +func (oc *SecondaryLayer3NetworkController) addUDNNodeSubnetEgressSNAT(localPodSubnets []*net.IPNet, node *kapi.Node) error { + outputPort := types.RouterToSwitchPrefix + oc.GetNetworkScopedName(node.Name) + nats, err := oc.buildUDNEgressSNAT(localPodSubnets, outputPort, node) + if err != nil { + return fmt.Errorf("failed to build UDN masquerade SNATs for network %q on node %q, err: %w", + oc.GetNetworkName(), node.Name, err) + } + if len(nats) == 0 { + return nil // nothing to do + } + router := &nbdb.LogicalRouter{ + Name: oc.GetNetworkScopedClusterRouterName(), + } + if err := libovsdbops.CreateOrUpdateNATs(oc.nbClient, router, nats...); err != nil { + return fmt.Errorf("failed to update SNAT for node subnet on router: %q for network %q, error: %w", + oc.GetNetworkScopedClusterRouterName(), oc.GetNetworkName(), err) + } + return nil +} + func (oc *SecondaryLayer3NetworkController) addNode(node *kapi.Node) ([]*net.IPNet, error) { // Node subnet for the secondary layer3 network is allocated by cluster manager. // Make sure that the node is allocated with the subnet before proceeding @@ -692,10 +834,15 @@ func (oc *SecondaryLayer3NetworkController) addNode(node *kapi.Node) ([]*net.IPN return nil, fmt.Errorf("subnet annotation in the node %q for the layer3 secondary network %s is missing : %w", node.Name, oc.GetNetworkName(), err) } - err = oc.createNodeLogicalSwitch(node.Name, hostSubnets, "", "") + err = oc.createNodeLogicalSwitch(node.Name, hostSubnets, oc.clusterLoadBalancerGroupUUID, oc.switchLoadBalancerGroupUUID) if err != nil { return nil, err } + if util.IsNetworkSegmentationSupportEnabled() && oc.IsPrimaryNetwork() { + if err := oc.addUDNNodeSubnetEgressSNAT(hostSubnets, node); err != nil { + return nil, err + } + } return hostSubnets, nil } @@ -794,11 +941,12 @@ func (oc *SecondaryLayer3NetworkController) gatherJoinSwitchIPs() error { } type SecondaryL3GatewayConfig struct { - config *util.L3GatewayConfig - hostSubnets []*net.IPNet - gwLRPIPs []*net.IPNet - hostAddrs []string - externalIPs []net.IP + config *util.L3GatewayConfig + hostSubnets []*net.IPNet + clusterSubnets []*net.IPNet + gwLRPJoinIPs []*net.IPNet + hostAddrs []string + externalIPs []net.IP } func (oc *SecondaryLayer3NetworkController) nodeGatewayConfig(node *kapi.Node) (*SecondaryL3GatewayConfig, error) { @@ -813,38 +961,20 @@ func (oc *SecondaryLayer3NetworkController) nodeGatewayConfig(node *kapi.Node) ( return nil, fmt.Errorf("failed to get networkID for network %q: %v", networkName, err) } - var ( - masqIPs []*net.IPNet - v4MasqIP *net.IPNet - v6MasqIP *net.IPNet - ) - - if config.IPv4Mode { - v4MasqIPs, err := udn.AllocateV4MasqueradeIPs(networkID) - if err != nil { - return nil, fmt.Errorf("failed to get v4 masquerade IP, network %s (%d): %v", networkName, networkID, err) - } - v4MasqIP = v4MasqIPs.GatewayRouter - masqIPs = append(masqIPs, v4MasqIP) - } - if config.IPv6Mode { - v6MasqIPs, err := udn.AllocateV6MasqueradeIPs(networkID) - if err != nil { - return nil, fmt.Errorf("failed to get v6 masquerade IP, network %s (%d): %v", networkName, networkID, err) - } - v6MasqIP = v6MasqIPs.GatewayRouter - masqIPs = append(masqIPs, v6MasqIP) + masqIPs, err := udn.GetUDNGatewayMasqueradeIPs(networkID) + if err != nil { + return nil, fmt.Errorf("failed to get masquerade IPs, network %s (%d): %v", networkName, networkID, err) } l3GatewayConfig.IPAddresses = append(l3GatewayConfig.IPAddresses, masqIPs...) // Always SNAT to the per network masquerade IP. var externalIPs []net.IP - if config.IPv4Mode && v4MasqIP != nil { - externalIPs = append(externalIPs, v4MasqIP.IP) - } - if config.IPv6Mode && v6MasqIP != nil { - externalIPs = append(externalIPs, v6MasqIP.IP) + for _, masqIP := range masqIPs { + if masqIP == nil { + continue + } + externalIPs = append(externalIPs, masqIP.IP) } var hostAddrs []string @@ -852,13 +982,19 @@ func (oc *SecondaryLayer3NetworkController) nodeGatewayConfig(node *kapi.Node) ( hostAddrs = append(hostAddrs, externalIP.String()) } - // Use the host subnets present in the network attachment definition. - hostSubnets := make([]*net.IPNet, 0, len(oc.Subnets())) + // Use the cluster subnets present in the network attachment definition. + clusterSubnets := make([]*net.IPNet, 0, len(oc.Subnets())) for _, subnet := range oc.Subnets() { - hostSubnets = append(hostSubnets, subnet.CIDR) + clusterSubnets = append(clusterSubnets, subnet.CIDR) } - gwLRPIPs, err := util.ParseNodeGatewayRouterJoinAddrs(node, oc.GetNetworkName()) + // Fetch the host subnets present in the node annotation for this network + hostSubnets, err := util.ParseNodeHostSubnetAnnotation(node, oc.GetNetworkName()) + if err != nil { + return nil, fmt.Errorf("failed to get node %q subnet annotation for network %q: %v", node.Name, oc.GetNetworkName(), err) + } + + gwLRPJoinIPs, err := util.ParseNodeGatewayRouterJoinAddrs(node, oc.GetNetworkName()) if err != nil { return nil, fmt.Errorf("failed extracting node %q GW router join subnet IP for layer3 network %q: %w", node.Name, networkName, err) } @@ -867,11 +1003,12 @@ func (oc *SecondaryLayer3NetworkController) nodeGatewayConfig(node *kapi.Node) ( l3GatewayConfig.InterfaceID = oc.GetNetworkScopedExtPortName(l3GatewayConfig.BridgeID, node.Name) return &SecondaryL3GatewayConfig{ - config: l3GatewayConfig, - hostSubnets: hostSubnets, - gwLRPIPs: gwLRPIPs, - hostAddrs: hostAddrs, - externalIPs: externalIPs, + config: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterSubnets, + gwLRPJoinIPs: gwLRPJoinIPs, + hostAddrs: hostAddrs, + externalIPs: externalIPs, }, nil } @@ -898,9 +1035,22 @@ func (oc *SecondaryLayer3NetworkController) newGatewayManager(nodeName string) * oc.nbClient, oc.NetInfo, oc.watchFactory, + oc.gatewayOptions()..., ) } +func (oc *SecondaryLayer3NetworkController) gatewayOptions() []GatewayOption { + var opts []GatewayOption + if oc.clusterLoadBalancerGroupUUID != "" { + opts = append(opts, WithLoadBalancerGroups( + oc.routerLoadBalancerGroupUUID, + oc.clusterLoadBalancerGroupUUID, + oc.switchLoadBalancerGroupUUID, + )) + } + return opts +} + func (oc *SecondaryLayer3NetworkController) gatewayManagerForNode(nodeName string) *GatewayManager { obj, isFound := oc.gatewayManagers.Load(nodeName) if !isFound { @@ -918,3 +1068,18 @@ func (oc *SecondaryLayer3NetworkController) gatewayManagerForNode(nodeName strin return gwManager } } + +func (oc *SecondaryLayer3NetworkController) StartServiceController(wg *sync.WaitGroup, runRepair bool) error { + wg.Add(1) + go func() { + defer wg.Done() + useLBGroups := oc.clusterLoadBalancerGroupUUID != "" + // use 5 workers like most of the kubernetes controllers in the kubernetes controller-manager + // do not use LB templates for UDNs - OVN bug https://issues.redhat.com/browse/FDP-988 + err := oc.svcController.Run(5, oc.stopChan, runRepair, useLBGroups, false) + if err != nil { + klog.Errorf("Error running OVN Kubernetes Services controller for network %s: %v", oc.GetNetworkName(), err) + } + }() + return nil +} diff --git a/go-controller/pkg/ovn/secondary_layer3_network_controller_test.go b/go-controller/pkg/ovn/secondary_layer3_network_controller_test.go index 266bd4149a..efbd660c4b 100644 --- a/go-controller/pkg/ovn/secondary_layer3_network_controller_test.go +++ b/go-controller/pkg/ovn/secondary_layer3_network_controller_test.go @@ -7,8 +7,8 @@ import ( "strings" "time" - . "github.com/onsi/ginkgo" - "github.com/onsi/ginkgo/extensions/table" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" cnitypes "github.com/containernetworking/cni/pkg/types" @@ -16,35 +16,45 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + knet "k8s.io/utils/net" + "k8s.io/utils/ptr" nadapi "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" + libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + networkAttachDefController "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/network-attach-def-controller" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" + fakenad "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/nad" ovntypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + networkingv1 "k8s.io/api/networking/v1" ) type secondaryNetInfo struct { - netName string - nadName string - subnets string - topology string - isPrimary bool + netName string + nadName string + clustersubnets string + hostsubnets string // not used in layer2 tests + topology string + isPrimary bool } const ( - dummyMACAddr = "02:03:04:05:06:07" nadName = "blue-net" ns = "namespace1" secondaryNetworkName = "isolatednet" + denyPolicyName = "deny-all-policy" + denyPG = "deny-port-group" ) type testConfiguration struct { configToOverride *config.OVNKubernetesFeatureConfig + gatewayConfig *config.GatewayConfig expectationOptions []option } @@ -79,12 +89,21 @@ var _ = Describe("OVN Multi-Homed pod operations", func() { fakeOvn.shutdown() }) - table.DescribeTable( + DescribeTable( "reconciles a new", - func(netInfo secondaryNetInfo, testConfig testConfiguration) { + func(netInfo secondaryNetInfo, testConfig testConfiguration, gwMode config.GatewayMode) { podInfo := dummyTestPod(ns, netInfo) if testConfig.configToOverride != nil { config.OVNKubernetesFeature = *testConfig.configToOverride + if testConfig.gatewayConfig != nil { + config.Gateway.DisableSNATMultipleGWs = testConfig.gatewayConfig.DisableSNATMultipleGWs + } + } + config.Gateway.Mode = gwMode + if knet.IsIPv6CIDRString(netInfo.clustersubnets) { + config.IPv6Mode = true + // tests dont support dualstack yet + config.IPv4Mode = false } app.Action = func(ctx *cli.Context) error { nad, err := newNetworkAttachmentDefinition( @@ -94,11 +113,9 @@ var _ = Describe("OVN Multi-Homed pod operations", func() { ) Expect(err).NotTo(HaveOccurred()) Expect(netInfo.setupOVNDependencies(&initialDB)).To(Succeed()) - if netInfo.isPrimary { networkConfig, err := util.NewNetInfo(netInfo.netconf()) Expect(err).NotTo(HaveOccurred()) - initialDB.NBData = append( initialDB.NBData, &nbdb.LogicalSwitch{ @@ -113,11 +130,14 @@ var _ = Describe("OVN Multi-Homed pod operations", func() { Name: fmt.Sprintf("rtos-%s_%s", netInfo.netName, nodeName), }, ) + initialDB.NBData = append(initialDB.NBData, getHairpinningACLsV4AndPortGroup()...) + initialDB.NBData = append(initialDB.NBData, getHairpinningACLsV4AndPortGroupForNetwork(networkConfig, nil)...) } const nodeIPv4CIDR = "192.168.126.202/24" testNode, err := newNodeWithSecondaryNets(nodeName, nodeIPv4CIDR, netInfo) Expect(err).NotTo(HaveOccurred()) + networkPolicy := getMatchLabelsNetworkPolicy(denyPolicyName, ns, "", "", false, false) fakeOvn.startWithDBSetup( initialDB, &v1.NamespaceList{ @@ -130,12 +150,15 @@ var _ = Describe("OVN Multi-Homed pod operations", func() { }, &v1.PodList{ Items: []v1.Pod{ - *newMultiHomedPod(podInfo.namespace, podInfo.podName, podInfo.nodeName, podInfo.podIP, netInfo), + *newMultiHomedPod(podInfo, netInfo), }, }, &nadapi.NetworkAttachmentDefinitionList{ Items: []nadapi.NetworkAttachmentDefinition{*nad}, }, + &networkingv1.NetworkPolicyList{ + Items: []networkingv1.NetworkPolicy{*networkPolicy}, + }, ) podInfo.populateLogicalSwitchCache(fakeOvn) @@ -145,8 +168,13 @@ var _ = Describe("OVN Multi-Homed pod operations", func() { _, ok := pod.Annotations[util.OvnPodAnnotationName] Expect(ok).To(BeFalse()) + Expect(fakeOvn.controller.nadController.Start()).NotTo(HaveOccurred()) + Expect(fakeOvn.controller.WatchNamespaces()).NotTo(HaveOccurred()) Expect(fakeOvn.controller.WatchPods()).NotTo(HaveOccurred()) + if netInfo.isPrimary { + Expect(fakeOvn.controller.WatchNetworkPolicy()).NotTo(HaveOccurred()) + } secondaryNetController, ok := fakeOvn.secondaryControllers[secondaryNetworkName] Expect(ok).To(BeTrue()) @@ -155,6 +183,13 @@ var _ = Describe("OVN Multi-Homed pod operations", func() { Expect(secondaryNetController.bnc.WatchNodes()).To(Succeed()) Expect(secondaryNetController.bnc.WatchPods()).To(Succeed()) + if netInfo.isPrimary { + Expect(secondaryNetController.bnc.WatchNetworkPolicy()).To(Succeed()) + ninfo, err := fakeOvn.nadController.GetActiveNetworkForNamespace(ns) + Expect(err).NotTo(HaveOccurred()) + Expect(ninfo.GetNetworkName()).To(Equal(netInfo.netName)) + } + // check that after start networks annotations and nbdb will be updated Eventually(func() string { return getPodAnnotations(fakeOvn.fakeClient.KubeClient, podInfo.namespace, podInfo.podName) @@ -168,6 +203,32 @@ var _ = Describe("OVN Multi-Homed pod operations", func() { Expect(err).NotTo(HaveOccurred()) Expect(gwConfig.NextHops).NotTo(BeEmpty()) expectationOptions = append(expectationOptions, withGatewayConfig(gwConfig)) + if testConfig.configToOverride != nil && testConfig.configToOverride.EnableEgressFirewall { + defaultNetExpectations = append(defaultNetExpectations, + buildNamespacedPortGroup(podInfo.namespace, DefaultNetworkControllerName)) + secNetPG := buildNamespacedPortGroup(podInfo.namespace, secondaryNetController.bnc.controllerName) + portName := util.GetSecondaryNetworkLogicalPortName(podInfo.namespace, podInfo.podName, netInfo.nadName) + "-UUID" + secNetPG.Ports = []string{portName} + defaultNetExpectations = append(defaultNetExpectations, secNetPG) + } + networkConfig, err := util.NewNetInfo(netInfo.netconf()) + Expect(err).NotTo(HaveOccurred()) + // Add NetPol hairpin ACLs and PGs for the validation. + mgmtPortName := managementPortName(secondaryNetController.bnc.GetNetworkScopedName(nodeName)) + mgmtPortUUID := mgmtPortName + "-UUID" + defaultNetExpectations = append(defaultNetExpectations, getHairpinningACLsV4AndPortGroup()...) + defaultNetExpectations = append(defaultNetExpectations, getHairpinningACLsV4AndPortGroupForNetwork(networkConfig, + []string{mgmtPortUUID})...) + // Add Netpol deny policy ACLs and PGs for the validation. + podLPortName := util.GetSecondaryNetworkLogicalPortName(podInfo.namespace, podInfo.podName, netInfo.nadName) + "-UUID" + dataParams := newNetpolDataParams(networkPolicy).withLocalPortUUIDs(podLPortName).withNetInfo(networkConfig) + defaultDenyExpectedData := getDefaultDenyData(dataParams) + pgDbIDs := getNetworkPolicyPortGroupDbIDs(ns, secondaryNetController.bnc.controllerName, denyPolicyName) + ingressPG := libovsdbutil.BuildPortGroup(pgDbIDs, nil, nil) + ingressPG.UUID = denyPG + ingressPG.Ports = []string{podLPortName} + defaultNetExpectations = append(defaultNetExpectations, ingressPG) + defaultNetExpectations = append(defaultNetExpectations, defaultDenyExpectedData...) } Eventually(fakeOvn.nbClient).Should( libovsdbtest.HaveData( @@ -177,37 +238,65 @@ var _ = Describe("OVN Multi-Homed pod operations", func() { fakeOvn, []testPod{podInfo}, expectationOptions..., - ).expectedLogicalSwitchesAndPorts()...))) + ).expectedLogicalSwitchesAndPorts(netInfo.isPrimary)...))) return nil } Expect(app.Run([]string{app.Name})).To(Succeed()) }, - table.Entry("pod on a user defined secondary network", - dummySecondaryUserDefinedNetwork("192.168.0.0/16"), + Entry("pod on a user defined secondary network", + dummySecondaryLayer3UserDefinedNetwork("192.168.0.0/16", "192.168.1.0/24"), nonICClusterTestConfiguration(), + config.GatewayModeShared, ), - table.Entry("pod on a user defined primary network", - dummyPrimaryUserDefinedNetwork("192.168.0.0/16"), + Entry("pod on a user defined primary network", + dummyPrimaryLayer3UserDefinedNetwork("192.168.0.0/16", "192.168.1.0/24"), nonICClusterTestConfiguration(), + config.GatewayModeShared, ), - table.Entry("pod on a user defined secondary network on an interconnect cluster", - dummySecondaryUserDefinedNetwork("192.168.0.0/16"), + Entry("pod on a user defined secondary network on an IC cluster", + dummySecondaryLayer3UserDefinedNetwork("192.168.0.0/16", "192.168.1.0/24"), icClusterTestConfiguration(), + config.GatewayModeShared, ), - table.Entry("pod on a user defined primary network on an interconnect cluster", - dummyPrimaryUserDefinedNetwork("192.168.0.0/16"), + Entry("pod on a user defined primary network on an IC cluster", + dummyPrimaryLayer3UserDefinedNetwork("192.168.0.0/16", "192.168.1.0/24"), icClusterTestConfiguration(), + config.GatewayModeShared, + ), + Entry("pod on a user defined primary network on an IC cluster; LGW", + dummyPrimaryLayer3UserDefinedNetwork("192.168.0.0/16", "192.168.1.0/24"), + icClusterTestConfiguration(), + config.GatewayModeLocal, + ), + Entry("pod on a user defined primary network on an IC cluster with per-pod SNATs enabled", + dummyPrimaryLayer3UserDefinedNetwork("192.168.0.0/16", "192.168.1.0/24"), + icClusterTestConfiguration(func(testConfig *testConfiguration) { + testConfig.gatewayConfig = &config.GatewayConfig{DisableSNATMultipleGWs: true} + }), + config.GatewayModeShared, + ), + Entry("pod on a user defined primary network on an IC cluster with EgressFirewall enabled", + dummyPrimaryLayer3UserDefinedNetwork("192.168.0.0/16", "192.168.1.0/24"), + icClusterTestConfiguration(func(config *testConfiguration) { + config.configToOverride.EnableEgressFirewall = true + }), + config.GatewayModeShared, ), ) - table.DescribeTable( + DescribeTable( "the gateway is properly cleaned up", func(netInfo secondaryNetInfo, testConfig testConfiguration) { + config.OVNKubernetesFeature.EnableMultiNetwork = true + config.OVNKubernetesFeature.EnableNetworkSegmentation = true podInfo := dummyTestPod(ns, netInfo) if testConfig.configToOverride != nil { config.OVNKubernetesFeature = *testConfig.configToOverride + if testConfig.gatewayConfig != nil { + config.Gateway.DisableSNATMultipleGWs = testConfig.gatewayConfig.DisableSNATMultipleGWs + } } app.Action = func(ctx *cli.Context) error { netConf := netInfo.netconf() @@ -221,11 +310,19 @@ var _ = Describe("OVN Multi-Homed pod operations", func() { ) Expect(err).NotTo(HaveOccurred()) + networkConfig.SetNADs(util.GetNADName(nad.Namespace, nad.Name)) + nadController := &fakenad.FakeNADController{ + PrimaryNetworks: make(map[string]util.NetInfo), + } + nadController.PrimaryNetworks[ns] = networkConfig + const nodeIPv4CIDR = "192.168.126.202/24" testNode, err := newNodeWithSecondaryNets(nodeName, nodeIPv4CIDR, netInfo) Expect(err).NotTo(HaveOccurred()) + nbZone := &nbdb.NBGlobal{Name: ovntypes.OvnDefaultZone, UUID: ovntypes.OvnDefaultZone} defaultNetExpectations := emptyDefaultClusterNetworkNodeSwitch(podInfo.nodeName) + defaultNetExpectations = append(defaultNetExpectations, nbZone) gwConfig, err := util.ParseNodeL3GatewayAnnotation(testNode) Expect(err).NotTo(HaveOccurred()) Expect(gwConfig.NextHops).NotTo(BeEmpty()) @@ -235,11 +332,19 @@ var _ = Describe("OVN Multi-Homed pod operations", func() { Expect(err).NotTo(HaveOccurred()) initialDB.NBData = append( initialDB.NBData, - expectedGWEntities(podInfo.nodeName, networkConfig, *gwConfig)...) + expectedGWEntities(podInfo.nodeName, netInfo.hostsubnets, networkConfig, *gwConfig)...) initialDB.NBData = append( initialDB.NBData, - expectedLayer3EgressEntities(networkConfig, *gwConfig)...) + expectedLayer3EgressEntities(networkConfig, *gwConfig, testing.MustParseIPNet(netInfo.hostsubnets))...) + initialDB.NBData = append(initialDB.NBData, + newNetworkClusterPortGroup(networkConfig), + ) + if testConfig.configToOverride != nil && testConfig.configToOverride.EnableEgressFirewall { + defaultNetExpectations = append(defaultNetExpectations, + buildNamespacedPortGroup(podInfo.namespace, DefaultNetworkControllerName)) + } } + initialDB.NBData = append(initialDB.NBData, nbZone) fakeOvn.startWithDBSetup( initialDB, @@ -253,7 +358,7 @@ var _ = Describe("OVN Multi-Homed pod operations", func() { }, &v1.PodList{ Items: []v1.Pod{ - *newMultiHomedPod(podInfo.namespace, podInfo.podName, podInfo.nodeName, podInfo.podIP, netInfo), + *newMultiHomedPod(podInfo, netInfo), }, }, &nadapi.NetworkAttachmentDefinitionList{ @@ -271,6 +376,8 @@ var _ = Describe("OVN Multi-Homed pod operations", func() { _, ok := pod.Annotations[util.OvnPodAnnotationName] Expect(ok).To(BeFalse()) + Expect(fakeOvn.controller.nadController.Start()).NotTo(HaveOccurred()) + Expect(fakeOvn.controller.WatchNamespaces()).To(Succeed()) Expect(fakeOvn.controller.WatchPods()).To(Succeed()) secondaryNetController, ok := fakeOvn.secondaryControllers[secondaryNetworkName] @@ -281,6 +388,10 @@ var _ = Describe("OVN Multi-Homed pod operations", func() { Expect(secondaryNetController.bnc.WatchNodes()).To(Succeed()) Expect(secondaryNetController.bnc.WatchPods()).To(Succeed()) + if netInfo.isPrimary { + Expect(secondaryNetController.bnc.WatchNetworkPolicy()).To(Succeed()) + } + Expect(fakeOvn.fakeClient.KubeClient.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, metav1.DeleteOptions{})).To(Succeed()) Expect(fakeOvn.fakeClient.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(nad.Namespace).Delete(context.Background(), nad.Name, metav1.DeleteOptions{})).To(Succeed()) @@ -290,6 +401,8 @@ var _ = Describe("OVN Multi-Homed pod operations", func() { &secondaryNetController.bnc.CommonNetworkControllerInfo, networkConfig, nodeName, + nadController, + nil, NewPortCache(ctx.Done()), ).Cleanup()).To(Succeed()) Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(defaultNetExpectations)) @@ -297,14 +410,26 @@ var _ = Describe("OVN Multi-Homed pod operations", func() { } Expect(app.Run([]string{app.Name})).To(Succeed()) }, - table.Entry("pod on a user defined primary network", - dummyPrimaryUserDefinedNetwork("192.168.0.0/16"), + Entry("pod on a user defined primary network", + dummyPrimaryLayer3UserDefinedNetwork("192.168.0.0/16", "192.168.1.0/24"), nonICClusterTestConfiguration(), ), - table.Entry("pod on a user defined primary network on an interconnect cluster", - dummyPrimaryUserDefinedNetwork("192.168.0.0/16"), + Entry("pod on a user defined primary network on an IC cluster", + dummyPrimaryLayer3UserDefinedNetwork("192.168.0.0/16", "192.168.1.0/24"), icClusterTestConfiguration(), ), + Entry("pod on a user defined primary network on an IC cluster with per-pod SNATs enabled", + dummyPrimaryLayer3UserDefinedNetwork("192.168.0.0/16", "192.168.1.0/24"), + icClusterTestConfiguration(func(testConfig *testConfiguration) { + testConfig.gatewayConfig = &config.GatewayConfig{DisableSNATMultipleGWs: true} + }), + ), + Entry("pod on a user defined primary network on an IC cluster with EgressFirewall enabled", + dummyPrimaryLayer3UserDefinedNetwork("192.168.0.0/16", "192.168.1.0/24"), + icClusterTestConfiguration(func(config *testConfiguration) { + config.configToOverride.EnableEgressFirewall = true + }), + ), ) }) @@ -330,25 +455,25 @@ func newPodWithPrimaryUDN( pod.addNetwork( primaryUDNConfig.netName, primaryUDNConfig.nadName, - primaryUDNConfig.subnets, + primaryUDNConfig.hostsubnets, "", nodeGWIP, - "192.168.0.3/16", - "0a:58:c0:a8:00:03", + "192.168.1.3/24", + "0a:58:c0:a8:01:03", "primary", 0, []util.PodRoute{ { Dest: testing.MustParseIPNet("192.168.0.0/16"), - NextHop: testing.MustParseIP("192.168.0.1"), + NextHop: testing.MustParseIP("192.168.1.1"), }, { Dest: testing.MustParseIPNet("172.16.1.0/24"), - NextHop: testing.MustParseIP("192.168.0.1"), + NextHop: testing.MustParseIP("192.168.1.1"), }, { Dest: testing.MustParseIPNet("100.65.0.0/16"), - NextHop: testing.MustParseIP("192.168.0.1"), + NextHop: testing.MustParseIP("192.168.1.1"), }, }, ) @@ -357,30 +482,42 @@ func newPodWithPrimaryUDN( func namespacedName(ns, name string) string { return fmt.Sprintf("%s/%s", ns, name) } +func (sni *secondaryNetInfo) getNetworkRole() string { + return util.GetUserDefinedNetworkRole(sni.isPrimary) +} + +func getNetworkRole(netInfo util.NetInfo) string { + return util.GetUserDefinedNetworkRole(netInfo.IsPrimaryNetwork()) +} + func (sni *secondaryNetInfo) setupOVNDependencies(dbData *libovsdbtest.TestSetup) error { netInfo, err := util.NewNetInfo(sni.netconf()) if err != nil { return err } + externalIDs := map[string]string{ + ovntypes.NetworkExternalID: sni.netName, + ovntypes.NetworkRoleExternalID: sni.getNetworkRole(), + } switch sni.topology { case ovntypes.Layer2Topology: dbData.NBData = append(dbData.NBData, &nbdb.LogicalSwitch{ Name: netInfo.GetNetworkScopedName(ovntypes.OVNLayer2Switch), UUID: netInfo.GetNetworkScopedName(ovntypes.OVNLayer2Switch) + "_UUID", - ExternalIDs: map[string]string{ovntypes.NetworkExternalID: sni.netName}, + ExternalIDs: externalIDs, }) case ovntypes.Layer3Topology: dbData.NBData = append(dbData.NBData, &nbdb.LogicalSwitch{ Name: netInfo.GetNetworkScopedName(nodeName), UUID: netInfo.GetNetworkScopedName(nodeName) + "_UUID", - ExternalIDs: map[string]string{ovntypes.NetworkExternalID: sni.netName}, + ExternalIDs: externalIDs, }) case ovntypes.LocalnetTopology: dbData.NBData = append(dbData.NBData, &nbdb.LogicalSwitch{ Name: netInfo.GetNetworkScopedName(ovntypes.OVNLocalnetSwitch), UUID: netInfo.GetNetworkScopedName(ovntypes.OVNLocalnetSwitch) + "_UUID", - ExternalIDs: map[string]string{ovntypes.NetworkExternalID: sni.netName}, + ExternalIDs: externalIDs, }) default: return fmt.Errorf("missing topology in the network configuration: %v", sni) @@ -402,7 +539,7 @@ func (sni *secondaryNetInfo) netconf() *ovncnitypes.NetConf { }, Topology: sni.topology, NADName: sni.nadName, - Subnets: sni.subnets, + Subnets: sni.clustersubnets, Role: role, } } @@ -414,7 +551,7 @@ func dummyTestPod(nsName string, info secondaryNetInfo) testPod { nodeName, nodeSubnet, "10.128.1.2", - "192.168.0.1", + "192.168.1.1", "myPod", "10.128.1.3", "0a:58:0a:80:01:03", @@ -426,40 +563,47 @@ func dummyTestPod(nsName string, info secondaryNetInfo) testPod { pod.addNetwork( info.netName, info.nadName, - info.subnets, + info.hostsubnets, "", "", - "192.168.0.3/16", - "0a:58:c0:a8:00:03", + "192.168.1.3/24", + "0a:58:c0:a8:01:03", "secondary", 0, []util.PodRoute{ { - Dest: testing.MustParseIPNet("192.168.0.0/16"), - NextHop: testing.MustParseIP("192.168.0.1"), + Dest: testing.MustParseIPNet(info.clustersubnets), + NextHop: testing.MustParseIP("192.168.1.1"), }, }, ) return pod } -func dummySecondaryUserDefinedNetwork(subnets string) secondaryNetInfo { +func dummyTestPodAdditionalNetworkIP() string { + secNetInfo := dummyPrimaryLayer2UserDefinedNetwork("192.168.0.0/16") + return dummyTestPod(ns, secNetInfo).getNetworkPortInfo(secNetInfo.netName, secNetInfo.nadName).podIP +} + +func dummySecondaryLayer3UserDefinedNetwork(clustersubnets, hostsubnets string) secondaryNetInfo { return secondaryNetInfo{ - netName: secondaryNetworkName, - nadName: namespacedName(ns, nadName), - topology: ovntypes.Layer3Topology, - subnets: subnets, + netName: secondaryNetworkName, + nadName: namespacedName(ns, nadName), + topology: ovntypes.Layer3Topology, + clustersubnets: clustersubnets, + hostsubnets: hostsubnets, } } -func dummyPrimaryUserDefinedNetwork(subnets string) secondaryNetInfo { - secondaryNet := dummySecondaryUserDefinedNetwork(subnets) +func dummyPrimaryLayer3UserDefinedNetwork(clustersubnets, hostsubnets string) secondaryNetInfo { + secondaryNet := dummySecondaryLayer3UserDefinedNetwork(clustersubnets, hostsubnets) secondaryNet.isPrimary = true return secondaryNet } +// This util is returning a network-name/hostSubnet for the node's node-subnets annotation func (sni *secondaryNetInfo) String() string { - return fmt.Sprintf("%q: %q", sni.netName, sni.subnets) + return fmt.Sprintf("%q: %q", sni.netName, sni.hostsubnets) } func newNodeWithSecondaryNets(nodeName string, nodeIPv4CIDR string, netInfos ...secondaryNetInfo) (*v1.Node, error) { @@ -479,15 +623,15 @@ func newNodeWithSecondaryNets(nodeName string, nodeIPv4CIDR string, netInfos ... ObjectMeta: metav1.ObjectMeta{ Name: nodeName, Annotations: map[string]string{ - "k8s.ovn.org/node-primary-ifaddr": fmt.Sprintf("{\"ipv4\": \"%s\", \"ipv6\": \"%s\"}", nodeIPv4CIDR, ""), - "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":\"%s\", %s}", v4Node1Subnet, strings.Join(nodeSubnetInfo, ",")), - util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", nodeIPv4CIDR), - "k8s.ovn.org/zone-name": "global", - "k8s.ovn.org/l3-gateway-config": fmt.Sprintf("{\"default\":{\"mode\":\"shared\",\"bridge-id\":\"breth0\",\"interface-id\":\"breth0_ovn-worker\",\"mac-address\":%q,\"ip-addresses\":[%[2]q],\"ip-address\":%[2]q,\"next-hops\":[%[3]q],\"next-hop\":%[3]q,\"node-port-enable\":\"true\",\"vlan-id\":\"0\"}}", util.IPAddrToHWAddr(nodeIP), nodeCIDR, nextHopIP), - util.OvnNodeChassisID: "abdcef", - "k8s.ovn.org/network-ids": "{\"default\":\"0\",\"isolatednet\":\"2\"}", - util.OvnNodeManagementPortMacAddresses: fmt.Sprintf("{\"isolatednet\":%q}", dummyMACAddr), - util.OVNNodeGRLRPAddrs: fmt.Sprintf("{\"isolatednet\":{\"ipv4\":%q}}", gwRouterIPAddress()), + "k8s.ovn.org/node-primary-ifaddr": fmt.Sprintf("{\"ipv4\": \"%s\", \"ipv6\": \"%s\"}", nodeIPv4CIDR, ""), + "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":\"%s\", %s}", v4Node1Subnet, strings.Join(nodeSubnetInfo, ",")), + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", nodeIPv4CIDR), + "k8s.ovn.org/zone-name": "global", + "k8s.ovn.org/l3-gateway-config": fmt.Sprintf("{\"default\":{\"mode\":\"shared\",\"bridge-id\":\"breth0\",\"interface-id\":\"breth0_ovn-worker\",\"mac-address\":%q,\"ip-addresses\":[%[2]q],\"ip-address\":%[2]q,\"next-hops\":[%[3]q],\"next-hop\":%[3]q,\"node-port-enable\":\"true\",\"vlan-id\":\"0\"}}", util.IPAddrToHWAddr(nodeIP), nodeCIDR, nextHopIP), + util.OvnNodeChassisID: "abdcef", + "k8s.ovn.org/network-ids": "{\"default\":\"0\",\"isolatednet\":\"2\"}", + util.OVNNodeGRLRPAddrs: fmt.Sprintf("{\"isolatednet\":{\"ipv4\":%q}}", gwRouterJoinIPAddress()), + "k8s.ovn.org/udn-layer2-node-gateway-router-lrp-tunnel-ids": "{\"isolatednet\":\"25\"}", }, Labels: map[string]string{ "k8s.ovn.org/egress-assignable": "", @@ -505,10 +649,10 @@ func newNodeWithSecondaryNets(nodeName string, nodeIPv4CIDR string, netInfos ... } func dummyJoinIPs() []*net.IPNet { - return []*net.IPNet{dummyJoinIP()} + return []*net.IPNet{dummyMasqueradeIP()} } -func dummyJoinIP() *net.IPNet { +func dummyMasqueradeIP() *net.IPNet { return &net.IPNet{ IP: net.ParseIP("169.254.169.13"), Mask: net.CIDRMask(24, 32), @@ -526,16 +670,16 @@ func emptyDefaultClusterNetworkNodeSwitch(nodeName string) []libovsdbtest.TestDa return []libovsdbtest.TestData{&nbdb.LogicalSwitch{UUID: switchUUID, Name: nodeName}} } -func expectedGWEntities(nodeName string, netInfo util.NetInfo, gwConfig util.L3GatewayConfig) []libovsdbtest.TestData { +func expectedGWEntities(nodeName, nodeSubnet string, netInfo util.NetInfo, gwConfig util.L3GatewayConfig) []libovsdbtest.TestData { gwRouterName := fmt.Sprintf("GR_%s_%s", netInfo.GetNetworkName(), nodeName) expectedEntities := append( expectedGWRouterPlusNATAndStaticRoutes(nodeName, gwRouterName, netInfo, gwConfig), - expectedGRToJoinSwitchLRP(gwRouterName, gwRouterIPAddress(), netInfo), + expectedGRToJoinSwitchLRP(gwRouterName, gwRouterJoinIPAddress(), netInfo), expectedGRToExternalSwitchLRP(gwRouterName, netInfo, nodePhysicalIPAddress(), udnGWSNATAddress()), expectedGatewayChassis(nodeName, netInfo, gwConfig), - expectedStaticMACBinding(gwRouterName, nextHopMasqueradeIP()), ) + expectedEntities = append(expectedEntities, expectedStaticMACBindings(gwRouterName, staticMACBindingIPs())...) expectedEntities = append(expectedEntities, expectedExternalSwitchAndLSPs(netInfo, gwConfig, nodeName)...) expectedEntities = append(expectedEntities, expectedJoinSwitchAndLSPs(netInfo, nodeName)...) return expectedEntities @@ -552,6 +696,7 @@ func expectedGWRouterPlusNATAndStaticRoutes( const ( nat1 = "abc-UUID" nat2 = "cba-UUID" + perPodSNAT = "pod-snat-UUID" staticRoute1 = "srA-UUID" staticRoute2 = "srB-UUID" staticRoute3 = "srC-UUID" @@ -560,36 +705,51 @@ func expectedGWRouterPlusNATAndStaticRoutes( staticRouteOutputPort := ovntypes.GWRouterToExtSwitchPrefix + netInfo.GetNetworkScopedGWRouterName(nodeName) nextHopIP := gwConfig.NextHops[0].String() - ipv4Subnet := networkSubnet(netInfo) nextHopMasqIP := nextHopMasqueradeIP().String() masqSubnet := config.Gateway.V4MasqueradeSubnet - return []libovsdbtest.TestData{ + var nat []string + if config.Gateway.DisableSNATMultipleGWs { + nat = append(nat, nat1, perPodSNAT) + } else { + nat = append(nat, nat1, nat2) + } + expectedEntities := []libovsdbtest.TestData{ &nbdb.LogicalRouter{ Name: gwRouterName, UUID: gwRouterName + "-UUID", ExternalIDs: gwRouterExternalIDs(netInfo, gwConfig), Options: gwRouterOptions(gwConfig), Ports: []string{gwRouterToJoinLRPUUID, gwRouterToExtLRPUUID}, - Nat: []string{nat1, nat2}, + Nat: nat, StaticRoutes: []string{staticRoute1, staticRoute2, staticRoute3}, }, - newNATEntry(nat1, dummyJoinIP().IP.String(), gwRouterIPAddress().IP.String(), standardNonDefaultNetworkExtIDs(netInfo)), - newNATEntry(nat2, dummyJoinIP().IP.String(), networkSubnet(netInfo), standardNonDefaultNetworkExtIDs(netInfo)), - expectedGRStaticRoute(staticRoute1, ipv4Subnet, dummyJoinIP().IP.String(), nil, nil, netInfo), + expectedGRStaticRoute(staticRoute1, netInfo.Subnets()[0].CIDR.String(), dummyMasqueradeIP().IP.String(), nil, nil, netInfo), expectedGRStaticRoute(staticRoute2, ipv4DefaultRoute, nextHopIP, nil, &staticRouteOutputPort, netInfo), expectedGRStaticRoute(staticRoute3, masqSubnet, nextHopMasqIP, nil, &staticRouteOutputPort, netInfo), } + if config.Gateway.DisableSNATMultipleGWs { + expectedEntities = append(expectedEntities, newNATEntry(nat1, dummyMasqueradeIP().IP.String(), gwRouterJoinIPAddress().IP.String(), standardNonDefaultNetworkExtIDs(netInfo), "")) + expectedEntities = append(expectedEntities, newNATEntry(perPodSNAT, dummyMasqueradeIP().IP.String(), dummyTestPodAdditionalNetworkIP(), nil, "")) + } else { + expectedEntities = append(expectedEntities, newNATEntry(nat1, dummyMasqueradeIP().IP.String(), gwRouterJoinIPAddress().IP.String(), standardNonDefaultNetworkExtIDs(netInfo), "")) + expectedEntities = append(expectedEntities, newNATEntry(nat2, dummyMasqueradeIP().IP.String(), netInfo.Subnets()[0].CIDR.String(), standardNonDefaultNetworkExtIDs(netInfo), "")) + } + return expectedEntities } -func expectedStaticMACBinding(gwRouterName string, ip net.IP) *nbdb.StaticMACBinding { +func expectedStaticMACBindings(gwRouterName string, ips []net.IP) []libovsdbtest.TestData { lrpName := fmt.Sprintf("%s%s", ovntypes.GWRouterToExtSwitchPrefix, gwRouterName) - return &nbdb.StaticMACBinding{ - UUID: lrpName + "static-mac-binding-UUID", - IP: ip.String(), - LogicalPort: lrpName, - MAC: util.IPAddrToHWAddr(nextHopMasqueradeIP()).String(), - OverrideDynamicMAC: true, + var bindings []libovsdbtest.TestData + for _, ip := range ips { + bindings = append(bindings, &nbdb.StaticMACBinding{ + UUID: fmt.Sprintf("%sstatic-mac-binding-UUID(%s)", lrpName, ip.String()), + IP: ip.String(), + LogicalPort: lrpName, + MAC: util.IPAddrToHWAddr(ip).String(), + OverrideDynamicMAC: true, + }) } + return bindings } func expectedGatewayChassis(nodeName string, netInfo util.NetInfo, gwConfig util.L3GatewayConfig) *nbdb.GatewayChassis { @@ -625,28 +785,38 @@ func expectedLogicalRouterPort(lrpName string, netInfo util.NetInfo, options map MAC: mac, Options: options, ExternalIDs: map[string]string{ - "k8s.ovn.org/topology": netInfo.TopologyType(), - "k8s.ovn.org/network": netInfo.GetNetworkName(), + ovntypes.TopologyExternalID: netInfo.TopologyType(), + ovntypes.NetworkExternalID: netInfo.GetNetworkName(), }, } } -func expectedLayer3EgressEntities(netInfo util.NetInfo, gwConfig util.L3GatewayConfig) []libovsdbtest.TestData { +func expectedLayer3EgressEntities(netInfo util.NetInfo, gwConfig util.L3GatewayConfig, nodeSubnet *net.IPNet) []libovsdbtest.TestData { const ( routerPolicyUUID1 = "lrpol1-UUID" routerPolicyUUID2 = "lrpol2-UUID" staticRouteUUID1 = "sr1-UUID" staticRouteUUID2 = "sr2-UUID" + staticRouteUUID3 = "sr3-UUID" + masqSNATUUID1 = "masq-snat1-UUID" ) - joinIPAddr := dummyJoinIP().IP.String() + masqIPAddr := dummyMasqueradeIP().IP.String() clusterRouterName := fmt.Sprintf("%s_ovn_cluster_router", netInfo.GetNetworkName()) rtosLRPName := fmt.Sprintf("%s%s", ovntypes.RouterToSwitchPrefix, netInfo.GetNetworkScopedName(nodeName)) rtosLRPUUID := rtosLRPName + "-UUID" nodeIP := gwConfig.IPAddresses[0].IP.String() - networkIPv4Subnet := networkSubnet(netInfo) - subnet := netInfo.Subnets()[0] // egress requires subnets. So far, these helpers do not work for dual-stack + masqSNAT := newNATEntry(masqSNATUUID1, "169.254.169.14", nodeSubnet.String(), standardNonDefaultNetworkExtIDs(netInfo), "") + masqSNAT.Match = getMasqueradeManagementIPSNATMatch(util.IPAddrToHWAddr(managementPortIP(nodeSubnet)).String()) + masqSNAT.LogicalPort = ptr.To(fmt.Sprintf("rtos-%s_%s", netInfo.GetNetworkName(), nodeName)) + if !config.OVNKubernetesFeature.EnableInterconnect { + masqSNAT.GatewayPort = ptr.To(fmt.Sprintf("rtos-%s_%s", netInfo.GetNetworkName(), nodeName) + "-UUID") + } gatewayChassisUUID := fmt.Sprintf("%s-%s-UUID", rtosLRPName, gwConfig.ChassisID) + lrsrNextHop := gwRouterJoinIPAddress().IP.String() + if config.Gateway.Mode == config.GatewayModeLocal { + lrsrNextHop = managementPortIP(nodeSubnet).String() + } expectedEntities := []libovsdbtest.TestData{ &nbdb.LogicalRouter{ Name: clusterRouterName, @@ -655,12 +825,14 @@ func expectedLayer3EgressEntities(netInfo util.NetInfo, gwConfig util.L3GatewayC StaticRoutes: []string{staticRouteUUID1, staticRouteUUID2}, Policies: []string{routerPolicyUUID1, routerPolicyUUID2}, ExternalIDs: standardNonDefaultNetworkExtIDs(netInfo), + Nat: []string{masqSNATUUID1}, }, - &nbdb.LogicalRouterPort{UUID: rtosLRPUUID, Name: rtosLRPName, Networks: []string{"192.168.0.1/16"}, MAC: "0a:58:c0:a8:00:01", GatewayChassis: []string{gatewayChassisUUID}}, - expectedGRStaticRoute(staticRouteUUID1, networkIPv4Subnet, gwRouterIPAddress().IP.String(), &nbdb.LogicalRouterStaticRoutePolicySrcIP, nil, netInfo), - expectedGRStaticRoute(staticRouteUUID2, gwRouterIPAddress().IP.String(), gwRouterIPAddress().IP.String(), nil, nil, netInfo), - expectedLogicalRouterPolicy(routerPolicyUUID1, netInfo, nodeName, nodeIP, managementPortIP(subnet.CIDR).String()), - expectedLogicalRouterPolicy(routerPolicyUUID2, netInfo, nodeName, joinIPAddr, managementPortIP(subnet.CIDR).String()), + &nbdb.LogicalRouterPort{UUID: rtosLRPUUID, Name: rtosLRPName, Networks: []string{"192.168.1.1/24"}, MAC: "0a:58:c0:a8:01:01", GatewayChassis: []string{gatewayChassisUUID}}, + expectedGRStaticRoute(staticRouteUUID1, nodeSubnet.String(), lrsrNextHop, &nbdb.LogicalRouterStaticRoutePolicySrcIP, nil, netInfo), + expectedGRStaticRoute(staticRouteUUID2, gwRouterJoinIPAddress().IP.String(), gwRouterJoinIPAddress().IP.String(), nil, nil, netInfo), + expectedLogicalRouterPolicy(routerPolicyUUID1, netInfo, nodeName, nodeIP, managementPortIP(nodeSubnet).String()), + expectedLogicalRouterPolicy(routerPolicyUUID2, netInfo, nodeName, masqIPAddr, managementPortIP(nodeSubnet).String()), + masqSNAT, } return expectedEntities } @@ -670,13 +842,14 @@ func expectedLogicalRouterPolicy(routerPolicyUUID1 string, netInfo util.NetInfo, priority = 1004 rerouteAction = "reroute" ) - networkScopedNodeName := netInfo.GetNetworkScopedName(nodeName) - lrpName := fmt.Sprintf("%s%s", ovntypes.RouterToSwitchPrefix, networkScopedNodeName) + networkScopedSwitchName := netInfo.GetNetworkScopedSwitchName(nodeName) + lrpName := fmt.Sprintf("%s%s", ovntypes.RouterToSwitchPrefix, networkScopedSwitchName) + return &nbdb.LogicalRouterPolicy{ UUID: routerPolicyUUID1, Action: rerouteAction, ExternalIDs: standardNonDefaultNetworkExtIDs(netInfo), - Match: fmt.Sprintf("inport == %q && ip4.dst == %s /* %s */", lrpName, destIP, networkScopedNodeName), + Match: fmt.Sprintf("inport == %q && ip4.dst == %s /* %s */", lrpName, destIP, networkScopedSwitchName), Nexthops: []string{nextHop}, Priority: priority, } @@ -690,8 +863,8 @@ func expectedGRStaticRoute(uuid, ipPrefix, nextHop string, policy *nbdb.LogicalR Nexthop: nextHop, Policy: policy, ExternalIDs: map[string]string{ - "k8s.ovn.org/network": netInfo.GetNetworkName(), - "k8s.ovn.org/topology": netInfo.TopologyType(), + ovntypes.NetworkExternalID: "isolatednet", + ovntypes.TopologyExternalID: netInfo.TopologyType(), }, } } @@ -730,11 +903,19 @@ func udnGWSNATAddress() *net.IPNet { } } -func newNATEntry(uuid string, externalIP string, logicalIP string, extIDs map[string]string) *nbdb.NAT { +func newMasqueradeManagementNATEntry(uuid string, externalIP string, logicalIP string, netInfo util.NetInfo) *nbdb.NAT { + masqSNAT := newNATEntry(uuid, "169.254.169.14", layer2Subnet().String(), standardNonDefaultNetworkExtIDs(netInfo), + getMasqueradeManagementIPSNATMatch(util.IPAddrToHWAddr(managementPortIP(layer2Subnet())).String())) + masqSNAT.LogicalPort = ptr.To(fmt.Sprintf("rtoj-GR_%s_%s", netInfo.GetNetworkName(), nodeName)) + return masqSNAT +} + +func newNATEntry(uuid string, externalIP string, logicalIP string, extIDs map[string]string, match string) *nbdb.NAT { return &nbdb.NAT{ UUID: uuid, ExternalIP: externalIP, LogicalIP: logicalIP, + Match: match, Type: "snat", Options: map[string]string{"stateless": "false"}, ExternalIDs: extIDs, @@ -751,7 +932,7 @@ func expectedExternalSwitchAndLSPs(netInfo util.NetInfo, gwConfig util.L3Gateway &nbdb.LogicalSwitch{ UUID: "ext-UUID", Name: netInfo.GetNetworkScopedExtSwitchName(nodeName), - ExternalIDs: standardNonDefaultNetworkExtIDs(netInfo), + ExternalIDs: standardNonDefaultNetworkExtIDsForLogicalSwitch(netInfo), Ports: []string{port1UUID, port2UUID}, }, &nbdb.LogicalSwitchPort{ @@ -807,17 +988,17 @@ func nextHopMasqueradeIP() net.IP { return net.ParseIP("169.254.169.4") } -func gwRouterIPAddress() *net.IPNet { +func staticMACBindingIPs() []net.IP { + return []net.IP{net.ParseIP("169.254.169.4"), net.ParseIP("169.254.169.2")} +} + +func gwRouterJoinIPAddress() *net.IPNet { return &net.IPNet{ IP: net.ParseIP("100.65.0.4"), Mask: net.CIDRMask(16, 32), } } -func networkSubnet(netInfo util.NetInfo) string { - return strings.TrimSuffix(subnetsAsString(netInfo.Subnets())[0], "/24") -} - func gwRouterOptions(gwConfig util.L3GatewayConfig) map[string]string { return map[string]string{ "lb_force_snat_ip": "router_ip", @@ -830,16 +1011,38 @@ func gwRouterOptions(gwConfig util.L3GatewayConfig) map[string]string { func standardNonDefaultNetworkExtIDs(netInfo util.NetInfo) map[string]string { return map[string]string{ - "k8s.ovn.org/topology": netInfo.TopologyType(), - "k8s.ovn.org/network": netInfo.GetNetworkName(), + ovntypes.TopologyExternalID: netInfo.TopologyType(), + ovntypes.NetworkExternalID: netInfo.GetNetworkName(), } } -func newSecondaryLayer3NetworkController(cnci *CommonNetworkControllerInfo, netInfo util.NetInfo, nodeName string) *SecondaryLayer3NetworkController { - layer3NetworkController := NewSecondaryLayer3NetworkController(cnci, netInfo) +func standardNonDefaultNetworkExtIDsForLogicalSwitch(netInfo util.NetInfo) map[string]string { + externalIDs := standardNonDefaultNetworkExtIDs(netInfo) + externalIDs[ovntypes.NetworkRoleExternalID] = getNetworkRole(netInfo) + return externalIDs +} + +func newSecondaryLayer3NetworkController(cnci *CommonNetworkControllerInfo, netInfo util.NetInfo, nodeName string, + nadController networkAttachDefController.NADController, eIPController *EgressIPController, portCache *PortCache) *SecondaryLayer3NetworkController { + layer3NetworkController, err := NewSecondaryLayer3NetworkController(cnci, netInfo, nadController, eIPController, portCache) + Expect(err).NotTo(HaveOccurred()) layer3NetworkController.gatewayManagers.Store( nodeName, newDummyGatewayManager(cnci.kube, cnci.nbClient, netInfo, cnci.watchFactory, nodeName), ) return layer3NetworkController } + +func buildNamespacedPortGroup(namespace, controller string) *nbdb.PortGroup { + pgIDs := getNamespacePortGroupDbIDs(namespace, controller) + pg := libovsdbutil.BuildPortGroup(pgIDs, nil, nil) + pg.UUID = pg.Name + "-UUID" + return pg +} + +func getNetworkPolicyPortGroupDbIDs(namespace, controllerName, name string) *libovsdbops.DbObjectIDs { + return libovsdbops.NewDbObjectIDs(libovsdbops.PortGroupNetworkPolicy, controllerName, + map[libovsdbops.ExternalIDKey]string{ + libovsdbops.ObjectNameKey: libovsdbops.BuildNamespaceNameKey(namespace, name), + }) +} diff --git a/go-controller/pkg/ovn/secondary_localnet_network_controller.go b/go-controller/pkg/ovn/secondary_localnet_network_controller.go index 12901c1d49..4071a317cb 100644 --- a/go-controller/pkg/ovn/secondary_localnet_network_controller.go +++ b/go-controller/pkg/ovn/secondary_localnet_network_controller.go @@ -8,11 +8,13 @@ import ( "time" mnpapi "github.com/k8snetworkplumbingwg/multi-networkpolicy/pkg/apis/k8s.cni.cncf.io/v1beta1" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/pod" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + networkAttachDefController "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/network-attach-def-controller" addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" lsm "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/logical_switch_manager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/persistentips" @@ -183,7 +185,8 @@ type SecondaryLocalnetNetworkController struct { } // NewSecondaryLocalnetNetworkController create a new OVN controller for the given secondary localnet NAD -func NewSecondaryLocalnetNetworkController(cnci *CommonNetworkControllerInfo, netInfo util.NetInfo) *SecondaryLocalnetNetworkController { +func NewSecondaryLocalnetNetworkController(cnci *CommonNetworkControllerInfo, netInfo util.NetInfo, + nadController networkAttachDefController.NADController) *SecondaryLocalnetNetworkController { stopChan := make(chan struct{}) @@ -197,7 +200,7 @@ func NewSecondaryLocalnetNetworkController(cnci *CommonNetworkControllerInfo, ne controllerName: getNetworkControllerName(netInfo.GetNetworkName()), NetInfo: netInfo, lsManager: lsm.NewL2SwitchManager(), - logicalPortCache: newPortCache(stopChan), + logicalPortCache: NewPortCache(stopChan), namespaces: make(map[string]*namespaceInfo), namespacesMutex: sync.Mutex{}, addressSetFactory: addressSetFactory, @@ -208,6 +211,7 @@ func NewSecondaryLocalnetNetworkController(cnci *CommonNetworkControllerInfo, ne wg: &sync.WaitGroup{}, cancelableCtx: util.NewCancelableContext(), localZoneNodes: &sync.Map{}, + nadController: nadController, }, }, }, @@ -268,7 +272,7 @@ func (oc *SecondaryLocalnetNetworkController) Cleanup() error { func (oc *SecondaryLocalnetNetworkController) Init() error { switchName := oc.GetNetworkScopedSwitchName(types.OVNLocalnetSwitch) - logicalSwitch, err := oc.initializeLogicalSwitch(switchName, oc.Subnets(), oc.ExcludeSubnets()) + logicalSwitch, err := oc.initializeLogicalSwitch(switchName, oc.Subnets(), oc.ExcludeSubnets(), "", "") if err != nil { return err } @@ -280,9 +284,7 @@ func (oc *SecondaryLocalnetNetworkController) Init() error { Name: oc.GetNetworkScopedName(types.OVNLocalnetPort), Addresses: []string{"unknown"}, Type: "localnet", - Options: map[string]string{ - "network_name": oc.GetNetworkName(), - }, + Options: oc.localnetPortNetworkNameOptions(), } intVlanID := int(oc.Vlan()) if intVlanID != 0 { @@ -315,7 +317,7 @@ func (oc *SecondaryLocalnetNetworkController) initRetryFramework() { // multi-network policy for IPAM-less secondary networks either. if util.IsMultiNetworkPoliciesSupportEnabled() { oc.retryNamespaces = oc.newRetryFramework(factory.NamespaceType) - oc.retryNetworkPolicies = oc.newRetryFramework(factory.MultiNetworkPolicyType) + oc.retryMultiNetworkPolicies = oc.newRetryFramework(factory.MultiNetworkPolicyType) } } @@ -342,3 +344,13 @@ func (oc *SecondaryLocalnetNetworkController) newRetryFramework( resourceHandler, ) } + +func (oc *SecondaryLocalnetNetworkController) localnetPortNetworkNameOptions() map[string]string { + localnetLSPOptions := map[string]string{ + "network_name": oc.GetNetworkName(), + } + if oc.PhysicalNetworkName() != "" { + localnetLSPOptions["network_name"] = oc.PhysicalNetworkName() + } + return localnetLSPOptions +} diff --git a/go-controller/pkg/ovn/topology/layer3factory_suite_test.go b/go-controller/pkg/ovn/topology/layer3factory_suite_test.go index 518823a469..b71ff910a8 100644 --- a/go-controller/pkg/ovn/topology/layer3factory_suite_test.go +++ b/go-controller/pkg/ovn/topology/layer3factory_suite_test.go @@ -3,7 +3,7 @@ package topology import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/ovn/topology/topologyfactory_test.go b/go-controller/pkg/ovn/topology/topologyfactory_test.go index 5956f2d5df..d8f8245111 100644 --- a/go-controller/pkg/ovn/topology/topologyfactory_test.go +++ b/go-controller/pkg/ovn/topology/topologyfactory_test.go @@ -3,7 +3,7 @@ package topology import ( "net" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" cnitypes "github.com/containernetworking/cni/pkg/types" @@ -11,6 +11,7 @@ import ( libovsdbclient "github.com/ovn-org/libovsdb/client" ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" ovntypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" @@ -33,6 +34,9 @@ var _ = Describe("Topology factory", func() { When("the original OVN DBs are empty", func() { BeforeEach(func() { + // required so that NewNetInfo can properly determine the IP families the cluster supports + config.IPv4Mode = true + config.IPv6Mode = true initialNBDB := []libovsdbtest.TestData{} initialSBDB := []libovsdbtest.TestData{} dbSetup := libovsdbtest.TestSetup{ @@ -160,8 +164,8 @@ var _ = Describe("Topology factory", func() { "0a:58:c0:a8:c8:0a", nil, map[string]string{ - "k8s.ovn.org/network": "angrytenant", - "k8s.ovn.org/topology": "layer3", + ovntypes.NetworkExternalID: "angrytenant", + ovntypes.TopologyExternalID: "layer3", }, ips(gwRoutersIPs...)..., ), diff --git a/go-controller/pkg/ovn/udn_isolation.go b/go-controller/pkg/ovn/udn_isolation.go index 6fcd2b9f91..9a129ffe4d 100644 --- a/go-controller/pkg/ovn/udn_isolation.go +++ b/go-controller/pkg/ovn/udn_isolation.go @@ -4,16 +4,19 @@ import ( "errors" "fmt" "net" + "strings" "k8s.io/klog/v2" utilnet "k8s.io/utils/net" libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + utilerrors "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/errors" ) const ( @@ -22,6 +25,8 @@ const ( AllowHostARPACL = "AllowHostARPSecondary" AllowHostSecondaryACL = "AllowHostSecondary" DenySecondaryACL = "DenySecondary" + // OpenPortACLPrefix is used to build per-pod ACLs, pod name should be added to the prefix to build a unique name + OpenPortACLPrefix = "OpenPort-" ) // setupUDNACLs should be called after the node's management port was configured @@ -108,7 +113,7 @@ func (oc *DefaultNetworkController) setupUDNACLs(mgmtPortIPs []net.IP) error { match = libovsdbutil.GetACLMatch(pgName, match, libovsdbutil.ACLIngress) ingressAllowACL := libovsdbutil.BuildACL(ingressAllowIDs, types.PrimaryUDNAllowPriority, match, nbdb.ACLActionAllowRelated, nil, libovsdbutil.LportIngress) - ops, err := libovsdbops.CreateOrUpdateACLsOps(oc.nbClient, nil, egressDenyACL, egressARPACL, ingressARPACL, ingressDenyACL, ingressAllowACL) + ops, err := libovsdbops.CreateOrUpdateACLsOps(oc.nbClient, nil, oc.GetSamplingConfig(), egressDenyACL, egressARPACL, ingressARPACL, ingressDenyACL, ingressAllowACL) if err != nil { return fmt.Errorf("failed to create or update UDN ACLs: %v", err) } @@ -136,3 +141,98 @@ func (oc *DefaultNetworkController) getUDNACLDbIDs(name string, aclDir libovsdbu libovsdbops.PolicyDirectionKey: string(aclDir), }) } + +func getPortsMatches(podAnnotations map[string]string, lspName string) (string, string, error) { + if lspName == "" { + return "", "", nil + } + ports, err := util.UnmarshalUDNOpenPortsAnnotation(podAnnotations) + if err != nil { + return "", "", err + } + if len(ports) == 0 { + return "", "", nil + } + // protocol match is only used for ingress rules, use dst match + portMatches := []string{} + for _, portDef := range ports { + if portDef.Protocol == "icmp" { + // from the ovn docs: + // "icmp expands to icmp4 || icmp6" + portMatches = append(portMatches, "icmp") + } else { + portMatches = append(portMatches, fmt.Sprintf("%s.dst == %d", portDef.Protocol, *portDef.Port)) + } + } + protoMatch := strings.Join(portMatches, " || ") + // allow ingress for ARP or ND and open ports + // allow egress for ARP or ND + ingressMatch := fmt.Sprintf(`outport == "%s" && (arp || nd || (%s))`, lspName, protoMatch) + egressMatch := fmt.Sprintf(`inport == "%s" && (arp || nd)`, lspName) + + return ingressMatch, egressMatch, nil +} + +// setUDNPodOpenPorts should be called after the pod's lsp is created to add ACLs that allow ingress on required ports. +// When lspName="", ACLs are removed. If annotation can't be parsed correctly, ACLs will be deleted. +func (oc *DefaultNetworkController) setUDNPodOpenPorts(podNamespacedName string, podAnnotations map[string]string, lspName string) error { + ops, parseErr, err := oc.setUDNPodOpenPortsOps(podNamespacedName, podAnnotations, lspName, nil) + if err != nil { + return errors.Join(parseErr, err) + } + _, err = libovsdbops.TransactAndCheck(oc.nbClient, ops) + if err != nil { + return utilerrors.Join(parseErr, fmt.Errorf("failed to transact open ports UDN ACLs: %v", err)) + } + return parseErr +} + +// setUDNPodOpenPortsOps returns the operations to add or remove ACLs that allow ingress on required ports. +// first returned error is parse error, second is db ops error +func (oc *DefaultNetworkController) setUDNPodOpenPortsOps(podNamespacedName string, podAnnotations map[string]string, lspName string, + ops []libovsdb.Operation) ([]libovsdb.Operation, error, error) { + udnPGName := libovsdbutil.GetPortGroupName(oc.getSecondaryPodsPortGroupDbIDs()) + + ingressMatch, egressMatch, parseErr := getPortsMatches(podAnnotations, lspName) + // don't return on parseErr, as we need to cleanup potentially present ACLs from the previous config + ingressIDs := oc.getUDNOpenPortDbIDs(podNamespacedName, libovsdbutil.ACLIngress) + ingressACL := libovsdbutil.BuildACL(ingressIDs, types.PrimaryUDNAllowPriority, + ingressMatch, nbdb.ACLActionAllowRelated, nil, libovsdbutil.LportIngress) + + egressIDs := oc.getUDNOpenPortDbIDs(podNamespacedName, libovsdbutil.ACLEgress) + egressACL := libovsdbutil.BuildACL(egressIDs, types.PrimaryUDNAllowPriority, + egressMatch, nbdb.ACLActionAllow, nil, libovsdbutil.LportEgress) + + var err error + if ingressMatch == "" && egressMatch == "" || parseErr != nil { + // no open ports or error parsing annotations, remove ACLs + foundACLs, err := libovsdbops.FindACLs(oc.nbClient, []*nbdb.ACL{ingressACL, egressACL}) + if err != nil { + return ops, parseErr, fmt.Errorf("failed to find open ports UDN ACLs: %v", err) + } + ops, err = libovsdbops.DeleteACLsFromPortGroupOps(oc.nbClient, ops, udnPGName, foundACLs...) + if err != nil { + return ops, parseErr, fmt.Errorf("failed to remove open ports ACLs from portGroup %s: %v", udnPGName, err) + } + } else { + // update ACLs + ops, err = libovsdbops.CreateOrUpdateACLsOps(oc.nbClient, ops, oc.GetSamplingConfig(), ingressACL, egressACL) + if err != nil { + return ops, parseErr, fmt.Errorf("failed to create or update open ports UDN ACLs: %v", err) + } + + ops, err = libovsdbops.AddACLsToPortGroupOps(oc.nbClient, ops, udnPGName, ingressACL, egressACL) + if err != nil { + return ops, parseErr, fmt.Errorf("failed to add open ports ACLs to portGroup %s: %v", udnPGName, err) + } + } + return ops, parseErr, nil +} + +func (oc *DefaultNetworkController) getUDNOpenPortDbIDs(podNamespacedName string, aclDir libovsdbutil.ACLDirection) *libovsdbops.DbObjectIDs { + return libovsdbops.NewDbObjectIDs(libovsdbops.ACLUDN, oc.controllerName, + map[libovsdbops.ExternalIDKey]string{ + libovsdbops.ObjectNameKey: OpenPortACLPrefix + podNamespacedName, + libovsdbops.PolicyDirectionKey: string(aclDir), + }) +} diff --git a/go-controller/pkg/ovn/zone_interconnect/chassis_handler_test.go b/go-controller/pkg/ovn/zone_interconnect/chassis_handler_test.go index 3e77529e82..8d2a9f5f87 100644 --- a/go-controller/pkg/ovn/zone_interconnect/chassis_handler_test.go +++ b/go-controller/pkg/ovn/zone_interconnect/chassis_handler_test.go @@ -3,7 +3,7 @@ package zoneinterconnect import ( "context" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "github.com/urfave/cli/v2" diff --git a/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go b/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go index ea9ef17b85..5136378814 100644 --- a/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go +++ b/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go @@ -141,7 +141,7 @@ func NewZoneInterconnectHandler(nInfo util.NetInfo, nbClient, sbClient libovsdbc nbClient: nbClient, sbClient: sbClient, watchFactory: watchFactory, - networkId: util.InvalidNetworkID, + networkId: util.InvalidID, } zic.networkClusterRouterName = zic.GetNetworkScopedName(types.OVNClusterRouter) @@ -444,7 +444,11 @@ func (zic *ZoneInterconnectHandler) createLocalZoneNodeResources(node *corev1.No func (zic *ZoneInterconnectHandler) createRemoteZoneNodeResources(node *corev1.Node, nodeID int, chassisId string) error { nodeTransitSwitchPortIPs, err := util.ParseNodeTransitSwitchPortAddrs(node) if err != nil || len(nodeTransitSwitchPortIPs) == 0 { - return fmt.Errorf("failed to get the node transit switch port Ips : %w", err) + err = fmt.Errorf("failed to get the node transit switch port IP addresses : %w", err) + if util.IsAnnotationNotSetError(err) { + return types.NewSuppressedError(err) + } + return err } transitRouterPortMac := util.IPAddrToHWAddr(nodeTransitSwitchPortIPs[0].IP) @@ -590,7 +594,12 @@ func (zic *ZoneInterconnectHandler) addRemoteNodeStaticRoutes(node *corev1.Node, nodeSubnets, err := util.ParseNodeHostSubnetAnnotation(node, zic.GetNetworkName()) if err != nil { - return fmt.Errorf("failed to parse node %s subnets annotation %w", node.Name, err) + err = fmt.Errorf("failed to parse node %s subnets annotation %w", node.Name, err) + if util.IsAnnotationNotSetError(err) { + // remote node may not have the annotation yet, suppress it + return types.NewSuppressedError(err) + } + return err } nodeSubnetStaticRoutes := zic.getStaticRoutes(nodeSubnets, nodeTransitSwitchPortIPs, false) @@ -601,9 +610,11 @@ func (zic *ZoneInterconnectHandler) addRemoteNodeStaticRoutes(node *corev1.Node, } } - if zic.IsSecondary() { + if zic.IsSecondary() && !(util.IsNetworkSegmentationSupportEnabled() && zic.IsPrimaryNetwork()) { // Secondary network cluster router doesn't connect to a join switch // or to a Gateway router. + // + // Except for UDN primary L3 networks. return nil } @@ -615,7 +626,11 @@ func (zic *ZoneInterconnectHandler) addRemoteNodeStaticRoutes(node *corev1.Node, var err1 error nodeGRPIPs, err1 = util.ParseNodeGatewayRouterLRPAddrs(node) if err1 != nil { - return fmt.Errorf("failed to parse node %s Gateway router LRP Addrs annotation %w", node.Name, err1) + err1 = fmt.Errorf("failed to parse node %s Gateway router LRP Addrs annotation %w", node.Name, err1) + if util.IsAnnotationNotSetError(err1) { + return types.NewSuppressedError(err1) + } + return err1 } } } @@ -733,14 +748,14 @@ func (zic *ZoneInterconnectHandler) getStaticRoutes(ipPrefixes []*net.IPNet, nex func (zic *ZoneInterconnectHandler) getNetworkId() (int, error) { nodes, err := zic.watchFactory.GetNodes() if err != nil { - return util.InvalidNetworkID, err + return util.InvalidID, err } return zic.getNetworkIdFromNodes(nodes) } // getNetworkId returns the cached network ID or looks it up in any of the provided nodes func (zic *ZoneInterconnectHandler) getNetworkIdFromNodes(nodes []*corev1.Node) (int, error) { - if zic.networkId != util.InvalidNetworkID { + if zic.networkId != util.InvalidID { return zic.networkId, nil } @@ -754,11 +769,11 @@ func (zic *ZoneInterconnectHandler) getNetworkIdFromNodes(nodes []*corev1.Node) if err != nil { break } - if networkId != util.InvalidNetworkID { + if networkId != util.InvalidID { zic.networkId = networkId return zic.networkId, nil } } - return util.InvalidNetworkID, fmt.Errorf("could not find network ID: %w", err) + return util.InvalidID, fmt.Errorf("could not find network ID: %w", err) } diff --git a/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler_test.go b/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler_test.go index 2225ff74d7..5abdb028fc 100644 --- a/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler_test.go +++ b/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler_test.go @@ -5,7 +5,7 @@ import ( "net" "sort" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" cnitypes "github.com/containernetworking/cni/pkg/types" diff --git a/go-controller/pkg/ovn/zone_interconnect/zone_interconnect_suite_test.go b/go-controller/pkg/ovn/zone_interconnect/zone_interconnect_suite_test.go index 6115c68c0f..540281fa08 100644 --- a/go-controller/pkg/ovn/zone_interconnect/zone_interconnect_suite_test.go +++ b/go-controller/pkg/ovn/zone_interconnect/zone_interconnect_suite_test.go @@ -3,7 +3,7 @@ package zoneinterconnect import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/ovnwebhook/nodeadmission.go b/go-controller/pkg/ovnwebhook/nodeadmission.go index 9425708c56..4156d1a735 100644 --- a/go-controller/pkg/ovnwebhook/nodeadmission.go +++ b/go-controller/pkg/ovnwebhook/nodeadmission.go @@ -25,6 +25,7 @@ type checkNodeAnnot func(v annotationChange, nodeName string) error // commonNodeAnnotationChecks holds annotations allowed for ovnkube-node: users in non-IC and IC environments var commonNodeAnnotationChecks = map[string]checkNodeAnnot{ + util.OVNNodeBridgeEgressIPs: nil, util.OVNNodeHostCIDRs: nil, util.OVNNodeSecondaryHostEgressIPs: nil, util.OvnNodeL3GatewayConfig: nil, diff --git a/go-controller/pkg/persistentips/allocator.go b/go-controller/pkg/persistentips/allocator.go index c14e116dc6..d17e111601 100644 --- a/go-controller/pkg/persistentips/allocator.go +++ b/go-controller/pkg/persistentips/allocator.go @@ -3,16 +3,17 @@ package persistentips import ( "errors" "fmt" - ipam "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/ip" - "k8s.io/klog/v2" "net" + "k8s.io/klog/v2" + "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" ipamclaimsapi "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" ipamclaimslister "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers/ipamclaims/v1alpha1" + ipam "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/ip" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" ovnktypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" @@ -150,7 +151,6 @@ func (icr *IPAMClaimReconciler) FindIPAMClaim(claimName string, namespace string // the cluster. For live pods, therse are already allocated, so no error will // be thrown (e.g. we ignore the `ipam.IsErrAllocated` error func (icr *IPAMClaimReconciler) Sync(objs []interface{}, ipAllocator IPAllocator) error { - var ips []*net.IPNet for _, obj := range objs { ipamClaim, ok := obj.(*ipamclaimsapi.IPAMClaim) if !ok { @@ -169,11 +169,11 @@ func (icr *IPAMClaimReconciler) Sync(objs []interface{}, ipAllocator IPAllocator if err != nil { return fmt.Errorf("failed at parsing IP when allocating persistent IPs: %w", err) } - ips = append(ips, ipnets...) - } - if len(ips) > 0 { - if err := ipAllocator.AllocateIPs(ips); err != nil && !ipam.IsErrAllocated(err) { - return fmt.Errorf("failed allocating persistent ips: %w", err) + + if len(ipnets) != 0 { + if err := ipAllocator.AllocateIPs(ipnets); err != nil && !ipam.IsErrAllocated(err) { + return fmt.Errorf("failed syncing persistent ips: %w", err) + } } } return nil diff --git a/go-controller/pkg/persistentips/allocator_test.go b/go-controller/pkg/persistentips/allocator_test.go index b888401b29..0b797c9332 100644 --- a/go-controller/pkg/persistentips/allocator_test.go +++ b/go-controller/pkg/persistentips/allocator_test.go @@ -2,11 +2,11 @@ package persistentips import ( "context" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/ip" "testing" - . "github.com/onsi/ginkgo" - "github.com/onsi/ginkgo/extensions/table" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/ip" + + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/containernetworking/cni/pkg/types" @@ -79,40 +79,42 @@ var _ = Describe("Persistent IP allocator operations", func() { Expect(ipamClaimsReconciler.Reconcile(nil, nil, namedAllocator)).To(Succeed()) }) - table.DescribeTable("reconciling IPAMClaims is successful when provided with", func(oldIPAMClaim, newIPAMClaim *ipamclaimsapi.IPAMClaim) { + DescribeTable("reconciling IPAMClaims is successful when provided with", func(oldIPAMClaim, newIPAMClaim *ipamclaimsapi.IPAMClaim) { Expect(ipamClaimsReconciler.Reconcile(oldIPAMClaim, newIPAMClaim, namedAllocator)).To(Succeed()) updatedIPAMClaim, err := ovnkapiclient.IPAMClaimsClient.K8sV1alpha1().IPAMClaims(namespace).Get(context.Background(), claimName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(updatedIPAMClaim.Status.IPs).To(ConsistOf(newIPAMClaim.Status.IPs)) }, - table.Entry( + Entry( "no IP addresses to persist", nil, emptyDummyIPAMClaim(namespace, claimName, networkName), ), - table.Entry( + Entry( "no IP addresses to persist, but it is nothing new", emptyDummyIPAMClaim(namespace, claimName, networkName), emptyDummyIPAMClaim(namespace, claimName, networkName), ), - table.Entry( + Entry( "an IP addresses to persist", nil, ipamClaimWithIPs(namespace, claimName, networkName), ), - table.Entry( + Entry( "an IP addresses to persist, but already present", ipamClaimWithIPs(namespace, claimName, networkName), ipamClaimWithIPs(namespace, claimName, networkName), ), ) - table.DescribeTable("syncing the IP allocator from the IPAMClaims is successful when provided with", func(ipamClaims ...interface{}) { + DescribeTable("syncing the IP allocator from the IPAMClaims is successful when provided with", func(ipamClaims ...interface{}) { Expect(ipamClaimsReconciler.Sync(ipamClaims, namedAllocator)).To(Succeed()) }, - table.Entry("no objects to sync with"), - table.Entry("an IPAMClaim without persisted IPs", emptyDummyIPAMClaim(namespace, claimName, networkName)), - table.Entry("an IPAMClaim with persisted IPs", ipamClaimWithIPs(namespace, claimName, networkName, "192.168.200.2/24", "fd10::1/64")), + Entry("no objects to sync with"), + Entry("an IPAMClaim without persisted IPs", emptyDummyIPAMClaim(namespace, claimName, networkName)), + Entry("an IPAMClaim with persisted IPs", + ipamClaimWithIPs(namespace, claimName, networkName, "192.168.200.2/24", "fd10::1/64"), + ipamClaimWithIPs(namespace, claimName, networkName, "192.168.200.3/24", "fd10::2/64")), ) }) @@ -167,7 +169,7 @@ var _ = Describe("Persistent IP allocator operations", func() { initialIPs = []string{"192.168.200.2/24", "fd10::1/64"} ipAllocator := subnet.NewAllocator() Expect(ipAllocator.AddOrUpdateSubnet(subnetName, ovntest.MustParseIPNets("192.168.200.0/24", "fd10::/64"))).To(Succeed()) - Expect(ipAllocator.AllocateIPs(subnetName, ovntest.MustParseIPNets(initialIPs...))).To(Succeed()) + Expect(ipAllocator.AllocateIPPerSubnet(subnetName, ovntest.MustParseIPNets(initialIPs...))).To(Succeed()) namedAllocator = ipAllocator.ForSubnet(subnetName) netInfo, err := util.NewNetInfo(dummyNetconf(networkName)) @@ -232,7 +234,7 @@ var _ = Describe("Persistent IP allocator operations", func() { }) Context("retrieving IPAMClaims", func() { - table.DescribeTable( + DescribeTable( "succeeds", func( netConf *ovncnitypes.NetConf, @@ -256,14 +258,14 @@ var _ = Describe("Persistent IP allocator operations", func() { ), ).To(Equal(expectedClaim)) }, - table.Entry( + Entry( "when the claim we're looking for is actually passed in layer2 topology", &ovncnitypes.NetConf{Topology: ovnktypes.Layer2Topology, Subnets: "192.10.10.0/24"}, &nadapi.NetworkSelectionElement{IPAMClaimReference: claimName, Namespace: namespace}, ipamClaimWithIPs(namespace, claimName, networkName, "192.10.10.10/24"), ipamClaimWithIPs(namespace, claimName, networkName, "192.10.10.10/24"), ), - table.Entry( + Entry( "when the claim we're looking for is actually passed in localnet topology", &ovncnitypes.NetConf{Topology: ovnktypes.LocalnetTopology, Subnets: "192.10.10.0/24"}, &nadapi.NetworkSelectionElement{IPAMClaimReference: claimName, Namespace: namespace}, @@ -272,7 +274,7 @@ var _ = Describe("Persistent IP allocator operations", func() { ), ) - table.DescribeTable( + DescribeTable( "fails", func( netConf *ovncnitypes.NetConf, @@ -295,63 +297,63 @@ var _ = Describe("Persistent IP allocator operations", func() { ) Expect(actualError).To(MatchError(expectedError)) }, - table.Entry( + Entry( "when an empty claim is passed in layer2 topology", &ovncnitypes.NetConf{Topology: ovnktypes.Layer2Topology}, &nadapi.NetworkSelectionElement{IPAMClaimReference: "", Namespace: namespace}, nil, ErrPersistentIPsNotAvailableOnNetwork, ), - table.Entry( + Entry( "when an empty claim is passed in localnet topology", &ovncnitypes.NetConf{Topology: ovnktypes.LocalnetTopology}, &nadapi.NetworkSelectionElement{IPAMClaimReference: "", Namespace: namespace}, nil, ErrPersistentIPsNotAvailableOnNetwork, ), - table.Entry( + Entry( "when an empty claim is passed in layer3 topology", &ovncnitypes.NetConf{Topology: ovnktypes.Layer3Topology}, &nadapi.NetworkSelectionElement{IPAMClaimReference: "", Namespace: namespace}, nil, ErrPersistentIPsNotAvailableOnNetwork, ), - table.Entry( + Entry( "when an empty datastore is passed in layer2 topology", &ovncnitypes.NetConf{Topology: ovnktypes.Layer2Topology}, &nadapi.NetworkSelectionElement{IPAMClaimReference: claimName, Namespace: namespace}, nil, ErrPersistentIPsNotAvailableOnNetwork, ), - table.Entry( + Entry( "when an empty datastore is passed in localnet topology", &ovncnitypes.NetConf{Topology: ovnktypes.LocalnetTopology}, &nadapi.NetworkSelectionElement{IPAMClaimReference: claimName, Namespace: namespace}, nil, ErrPersistentIPsNotAvailableOnNetwork, ), - table.Entry( + Entry( "when an empty datastore is passed in layer3 topology", &ovncnitypes.NetConf{Topology: ovnktypes.Layer3Topology}, &nadapi.NetworkSelectionElement{IPAMClaimReference: claimName, Namespace: namespace}, nil, ErrPersistentIPsNotAvailableOnNetwork, ), - table.Entry( + Entry( "when the claim we're looking for is actually passed in layer2 topology for a network without subnets", &ovncnitypes.NetConf{Topology: ovnktypes.Layer2Topology}, &nadapi.NetworkSelectionElement{IPAMClaimReference: claimName, Namespace: namespace}, ipamClaimWithIPs(namespace, claimName, networkName, "192.10.10.10/24"), ErrPersistentIPsNotAvailableOnNetwork, ), - table.Entry( + Entry( "when the claim we're looking for is actually passed in localnet topology for a network without subnets", &ovncnitypes.NetConf{Topology: ovnktypes.LocalnetTopology}, &nadapi.NetworkSelectionElement{IPAMClaimReference: claimName, Namespace: namespace}, ipamClaimWithIPs(namespace, claimName, networkName, "192.10.10.10/24"), ErrPersistentIPsNotAvailableOnNetwork, ), - table.Entry( + Entry( "when the claim we're looking for is actually passed in layer3 topology", &ovncnitypes.NetConf{Topology: ovnktypes.Layer3Topology, Subnets: "192.10.10.0/16/24"}, &nadapi.NetworkSelectionElement{IPAMClaimReference: claimName, Namespace: namespace}, diff --git a/go-controller/pkg/sbdb/igmp_group.go b/go-controller/pkg/sbdb/igmp_group.go index a8576a4834..73a0bb9437 100644 --- a/go-controller/pkg/sbdb/igmp_group.go +++ b/go-controller/pkg/sbdb/igmp_group.go @@ -15,6 +15,7 @@ type IGMPGroup struct { ChassisName string `ovsdb:"chassis_name"` Datapath *string `ovsdb:"datapath"` Ports []string `ovsdb:"ports"` + Protocol string `ovsdb:"protocol"` } func (a *IGMPGroup) GetUUID() string { @@ -101,6 +102,10 @@ func equalIGMPGroupPorts(a, b []string) bool { return true } +func (a *IGMPGroup) GetProtocol() string { + return a.Protocol +} + func (a *IGMPGroup) DeepCopyInto(b *IGMPGroup) { *b = *a b.Chassis = copyIGMPGroupChassis(a.Chassis) @@ -129,7 +134,8 @@ func (a *IGMPGroup) Equals(b *IGMPGroup) bool { equalIGMPGroupChassis(a.Chassis, b.Chassis) && a.ChassisName == b.ChassisName && equalIGMPGroupDatapath(a.Datapath, b.Datapath) && - equalIGMPGroupPorts(a.Ports, b.Ports) + equalIGMPGroupPorts(a.Ports, b.Ports) && + a.Protocol == b.Protocol } func (a *IGMPGroup) EqualsModel(b model.Model) bool { diff --git a/go-controller/pkg/sbdb/logical_flow.go b/go-controller/pkg/sbdb/logical_flow.go index 26fbda1b51..42af1cdf54 100644 --- a/go-controller/pkg/sbdb/logical_flow.go +++ b/go-controller/pkg/sbdb/logical_flow.go @@ -22,6 +22,7 @@ type LogicalFlow struct { Actions string `ovsdb:"actions"` ControllerMeter *string `ovsdb:"controller_meter"` ExternalIDs map[string]string `ovsdb:"external_ids"` + FlowDesc *string `ovsdb:"flow_desc"` LogicalDatapath *string `ovsdb:"logical_datapath"` LogicalDpGroup *string `ovsdb:"logical_dp_group"` Match string `ovsdb:"match"` @@ -91,6 +92,28 @@ func equalLogicalFlowExternalIDs(a, b map[string]string) bool { return true } +func (a *LogicalFlow) GetFlowDesc() *string { + return a.FlowDesc +} + +func copyLogicalFlowFlowDesc(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalFlowFlowDesc(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + func (a *LogicalFlow) GetLogicalDatapath() *string { return a.LogicalDatapath } @@ -185,6 +208,7 @@ func (a *LogicalFlow) DeepCopyInto(b *LogicalFlow) { *b = *a b.ControllerMeter = copyLogicalFlowControllerMeter(a.ControllerMeter) b.ExternalIDs = copyLogicalFlowExternalIDs(a.ExternalIDs) + b.FlowDesc = copyLogicalFlowFlowDesc(a.FlowDesc) b.LogicalDatapath = copyLogicalFlowLogicalDatapath(a.LogicalDatapath) b.LogicalDpGroup = copyLogicalFlowLogicalDpGroup(a.LogicalDpGroup) b.Tags = copyLogicalFlowTags(a.Tags) @@ -210,6 +234,7 @@ func (a *LogicalFlow) Equals(b *LogicalFlow) bool { a.Actions == b.Actions && equalLogicalFlowControllerMeter(a.ControllerMeter, b.ControllerMeter) && equalLogicalFlowExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalLogicalFlowFlowDesc(a.FlowDesc, b.FlowDesc) && equalLogicalFlowLogicalDatapath(a.LogicalDatapath, b.LogicalDatapath) && equalLogicalFlowLogicalDpGroup(a.LogicalDpGroup, b.LogicalDpGroup) && a.Match == b.Match && diff --git a/go-controller/pkg/sbdb/model.go b/go-controller/pkg/sbdb/model.go index 109819f75c..bc838fe497 100644 --- a/go-controller/pkg/sbdb/model.go +++ b/go-controller/pkg/sbdb/model.go @@ -52,7 +52,7 @@ func FullDatabaseModel() (model.ClientDBModel, error) { var schema = `{ "name": "OVN_Southbound", - "version": "20.33.0", + "version": "20.37.0", "tables": { "Address_Set": { "columns": { @@ -833,6 +833,9 @@ var schema = `{ "min": 0, "max": "unlimited" } + }, + "protocol": { + "type": "string" } }, "indexes": [ @@ -1071,6 +1074,15 @@ var schema = `{ "max": "unlimited" } }, + "flow_desc": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, "logical_datapath": { "type": { "key": { diff --git a/go-controller/pkg/testing/kube.go b/go-controller/pkg/testing/kube.go index 0edeb43409..2dbf5d7fc4 100644 --- a/go-controller/pkg/testing/kube.go +++ b/go-controller/pkg/testing/kube.go @@ -1,6 +1,7 @@ package testing import ( + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" discovery "k8s.io/api/discovery/v1" "k8s.io/utils/ptr" ) @@ -49,3 +50,18 @@ func MakeTerminatingNonServingEndpoint(node string, addresses ...string) discove NodeName: &node, } } + +func MirrorEndpointSlice(defaultEndpointSlice *discovery.EndpointSlice, network string, keepEndpoints bool) *discovery.EndpointSlice { + mirror := defaultEndpointSlice.DeepCopy() + mirror.Name = defaultEndpointSlice.Name + "-mirrored" + mirror.Labels[discovery.LabelManagedBy] = types.EndpointSliceMirrorControllerName + mirror.Labels[types.LabelSourceEndpointSlice] = defaultEndpointSlice.Name + mirror.Labels[types.LabelUserDefinedEndpointSliceNetwork] = network + mirror.Labels[types.LabelUserDefinedServiceName] = defaultEndpointSlice.Labels[discovery.LabelServiceName] + + if !keepEndpoints { + mirror.Endpoints = nil + } + + return mirror +} diff --git a/go-controller/pkg/testing/libovsdb/libovsdb.go b/go-controller/pkg/testing/libovsdb/libovsdb.go index 889fc755a0..98a6dbe646 100644 --- a/go-controller/pkg/testing/libovsdb/libovsdb.go +++ b/go-controller/pkg/testing/libovsdb/libovsdb.go @@ -33,6 +33,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/vswitchd" ) type TestSetup struct { @@ -41,8 +42,9 @@ type TestSetup struct { // addition of invalid data (like duplicate indexes). IgnoreConstraints bool - NBData []TestData - SBData []TestData + NBData []TestData + SBData []TestData + OVSData []TestData } type TestData interface{} @@ -58,9 +60,9 @@ type Context struct { serverStopCh chan struct{} serverWg *sync.WaitGroup - SBServer *TestOvsdbServer - NBServer *TestOvsdbServer - VSServer *TestOvsdbServer + SBServer *TestOvsdbServer + NBServer *TestOvsdbServer + OVSServer *TestOvsdbServer } func newContext() *Context { @@ -126,6 +128,44 @@ func NewSBTestHarness(setup TestSetup, testCtx *Context) (libovsdbclient.Client, return client, testCtx, err } +// NewOVSTestHarness runs OVSDB server and returns corresponding client +func NewOVSTestHarness(setup TestSetup) (libovsdbclient.Client, *Context, error) { + testCtx := newContext() + randBytes := make([]byte, 16) + cryptorand.Read(randBytes) + tmpOVSSocketPath := filepath.Join(os.TempDir(), "ovs-"+hex.EncodeToString(randBytes)) + + cfg := config.OvnAuthConfig{ + Scheme: config.OvnDBSchemeUnix, + Address: "unix:" + tmpOVSSocketPath, + } + + server, err := newOVSServer(cfg, setup.OVSData, false) + if err != nil { + return nil, nil, err + } + + client, err := newOVSClient(cfg, testCtx) + if err != nil { + server.Close() + return nil, nil, err + } + + testCtx.serverWg.Add(1) + go func() { + defer testCtx.serverWg.Done() + <-testCtx.serverStopCh + server.Close() + }() + + if err != nil { + return nil, nil, err + } + testCtx.OVSServer = server + + return client, testCtx, err +} + func newOVSDBTestHarness(serverData []TestData, ignoreConstraints bool, newServer serverBuilderFn, newClient clientBuilderFn, testCtx *Context) (libovsdbclient.Client, *TestOvsdbServer, error) { cfg := config.OvnAuthConfig{ Scheme: config.OvnDBSchemeUnix, @@ -209,6 +249,26 @@ func newNBServer(cfg config.OvnAuthConfig, data []TestData, ignoreConstraints bo return newOVSDBServer(cfg, dbModel, schema, data, ignoreConstraints) } +func newOVSServer(cfg config.OvnAuthConfig, data []TestData, ignoreConstraints bool) (*TestOvsdbServer, error) { + dbModel, err := vswitchd.FullDatabaseModel() + if err != nil { + return nil, err + } + schema := vswitchd.Schema() + return newOVSDBServer(cfg, dbModel, schema, data, ignoreConstraints) +} + +func newOVSClient(cfg config.OvnAuthConfig, testCtx *Context) (libovsdbclient.Client, error) { + stopChan := make(chan struct{}) + ovsClient, err := libovsdb.NewOVSClientWithConfig(cfg, stopChan) + if err != nil { + return nil, err + } + + clientWaitOnCleanup(testCtx, ovsClient, stopChan) + return ovsClient, err +} + func testDataToOperations(dbMod model.DatabaseModel, data []TestData) ([]ovsdb.Operation, error) { m := mapper.NewMapper(dbMod.Schema) newData := copystructure.Must(copystructure.Copy(data)).([]TestData) diff --git a/go-controller/pkg/testing/nad/netattach.go b/go-controller/pkg/testing/nad/netattach.go new file mode 100644 index 0000000000..1e60fd8784 --- /dev/null +++ b/go-controller/pkg/testing/nad/netattach.go @@ -0,0 +1,79 @@ +package nad + +import ( + "context" + "errors" + + networkAttachDefController "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/network-attach-def-controller" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" +) + +type FakeNetworkController struct { + util.NetInfo +} + +func (nc *FakeNetworkController) Start(ctx context.Context) error { + return nil +} + +func (nc *FakeNetworkController) Stop() {} + +func (nc *FakeNetworkController) Cleanup() error { + return nil +} + +type FakeNetworkControllerManager struct{} + +func (ncm *FakeNetworkControllerManager) NewNetworkController(netInfo util.NetInfo) (networkAttachDefController.NetworkController, error) { + return &FakeNetworkController{netInfo}, nil +} + +func (ncm *FakeNetworkControllerManager) CleanupDeletedNetworks(validNetworks ...util.BasicNetInfo) error { + return nil +} + +type FakeNADController struct { + // namespace -> netInfo + PrimaryNetworks map[string]util.NetInfo +} + +func (nc *FakeNADController) Start() error { return nil } +func (nc *FakeNADController) Stop() {} +func (nc *FakeNADController) GetActiveNetworkForNamespace(namespace string) (util.NetInfo, error) { + if primaryNetworks, ok := nc.PrimaryNetworks[namespace]; ok && primaryNetworks != nil { + return primaryNetworks, nil + } + return &util.DefaultNetInfo{}, nil +} +func (nc *FakeNADController) GetNetwork(networkName string) (util.NetInfo, error) { + for _, ni := range nc.PrimaryNetworks { + if ni.GetNetworkName() == networkName { + return ni, nil + } + } + return &util.DefaultNetInfo{}, nil +} +func (nc *FakeNADController) GetActiveNetworkNamespaces(networkName string) ([]string, error) { + namespaces := make([]string, 0) + for namespaceName, primaryNAD := range nc.PrimaryNetworks { + nadNetworkName := primaryNAD.GetNADs()[0] + if nadNetworkName != networkName { + continue + } + namespaces = append(namespaces, namespaceName) + } + return namespaces, nil +} + +func (nc *FakeNADController) DoWithLock(f func(network util.NetInfo) error) error { + var errs []error + for _, ni := range nc.PrimaryNetworks { + if err := f(ni); err != nil { + errs = append(errs, err) + } + } + if len(errs) > 0 { + return errors.Join(errs...) + } + return nil +} diff --git a/go-controller/pkg/testing/noroot.go b/go-controller/pkg/testing/noroot.go new file mode 100644 index 0000000000..06b88abaf5 --- /dev/null +++ b/go-controller/pkg/testing/noroot.go @@ -0,0 +1,7 @@ +package testing + +import "os" + +func NoRoot() bool { + return os.Getenv("NOROOT") == "TRUE" +} diff --git a/go-controller/pkg/testing/testing.go b/go-controller/pkg/testing/testing.go index 7045bab170..497ae17a8f 100644 --- a/go-controller/pkg/testing/testing.go +++ b/go-controller/pkg/testing/testing.go @@ -1,9 +1,7 @@ package testing import ( - "os" - - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega/format" kruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -14,7 +12,7 @@ import ( // tests that are unable to execute in certain environments. Such as those without // root or cap_net_admin privileges func OnSupportedPlatformsIt(description string, f interface{}) { - if os.Getenv("NOROOT") != "TRUE" { + if !NoRoot() { ginkgo.It(description, f) } else { defer ginkgo.GinkgoRecover() diff --git a/go-controller/pkg/types/const.go b/go-controller/pkg/types/const.go index 5523663398..1be9adcb45 100644 --- a/go-controller/pkg/types/const.go +++ b/go-controller/pkg/types/const.go @@ -108,18 +108,27 @@ const ( MGMTPortPolicyPriority = "1005" NodeSubnetPolicyPriority = "1004" InterNodePolicyPriority = "1003" + UDNHostCIDRPolicyPriority = "99" HybridOverlaySubnetPriority = 1002 HybridOverlayReroutePriority = 501 DefaultNoRereoutePriority = 102 EgressSVCReroutePriority = 101 EgressIPReroutePriority = 100 EgressIPRerouteQoSRulePriority = 103 - EgressLiveMigrationReroutePiority = 10 + // priority of logical router policies on a nodes gateway router + EgressIPSNATMarkPriority = 95 + EgressLiveMigrationReroutePriority = 10 // EndpointSliceMirrorControllerName mirror EndpointSlice controller name (used as a value for the "endpointslice.kubernetes.io/managed-by" label) EndpointSliceMirrorControllerName = "endpointslice-mirror-controller.k8s.ovn.org" // EndpointSliceDefaultControllerName default kubernetes EndpointSlice controller name (used as a value for the "endpointslice.kubernetes.io/managed-by" label) EndpointSliceDefaultControllerName = "endpointslice-controller.k8s.io" + // LabelSourceEndpointSlice label key used in mirrored EndpointSlice + // that has the value of the default EndpointSlice name + LabelSourceEndpointSlice = "k8s.ovn.org/source-endpointslice" + // LabelSourceEndpointSliceVersion label key used in mirrored EndpointSlice + // that has the value of the last known default EndpointSlice ResourceVersion + LabelSourceEndpointSliceVersion = "k8s.ovn.org/source-endpointslice-version" // LabelUserDefinedEndpointSliceNetwork label key used in mirrored EndpointSlices that contains the current primary user defined network name LabelUserDefinedEndpointSliceNetwork = "k8s.ovn.org/endpointslice-network" // LabelUserDefinedServiceName label key used in mirrored EndpointSlices that contains the service name matching the EndpointSlice @@ -146,6 +155,11 @@ const ( // OVN-K8S annotation & taint constants OvnK8sPrefix = "k8s.ovn.org" + + // DefaultNetworkLabelSelector is the label that needs to be matched on a + // selector to select the default network + DefaultNetworkLabelSelector = OvnK8sPrefix + "/default-network" + // Deprecated: we used to set topology version as an annotation on the node. We don't do this anymore. OvnK8sTopoAnno = OvnK8sPrefix + "/" + "topology-version" OvnK8sSmallMTUTaintKey = OvnK8sPrefix + "/" + "mtu-too-small" @@ -179,7 +193,12 @@ const ( // key for network name external-id NetworkExternalID = OvnK8sPrefix + "/" + "network" + // key for node name external-id + NodeExternalID = OvnK8sPrefix + "/" + "node" + // key for network role external-id: possible values are "default", "primary", "secondary" + NetworkRoleExternalID = OvnK8sPrefix + "/" + "role" // key for NAD name external-id, only used for secondary logical switch port of a pod + // key for network name external-id NADExternalID = OvnK8sPrefix + "/" + "nad" // key for topology type external-id, only used for secondary network logical entities TopologyExternalID = OvnK8sPrefix + "/" + "topology" @@ -187,6 +206,8 @@ const ( LoadBalancerKindExternalID = OvnK8sPrefix + "/" + "kind" // key for load_balancer service external-id LoadBalancerOwnerExternalID = OvnK8sPrefix + "/" + "owner" + // key for UDN enabled services routes + UDNEnabledServiceExternalID = OvnK8sPrefix + "/" + "udn-enabled-default-service" // different secondary network topology type defined in CNI netconf Layer3Topology = "layer3" @@ -197,6 +218,7 @@ const ( // defined in CNI netconf as a user defined network NetworkRolePrimary = "primary" NetworkRoleSecondary = "secondary" + NetworkRoleDefault = "default" // defined internally by ovnkube to recognize "default" // network's role as a "infrastructure-locked" network // when user defined network is the primary network for diff --git a/go-controller/pkg/util/batching/batch_test.go b/go-controller/pkg/util/batching/batch_test.go index 9e27c44ed1..58fbea3fb0 100644 --- a/go-controller/pkg/util/batching/batch_test.go +++ b/go-controller/pkg/util/batching/batch_test.go @@ -2,7 +2,7 @@ package batching import ( "fmt" - "github.com/onsi/ginkgo" + "github.com/onsi/gomega" "strings" @@ -58,7 +58,6 @@ func TestBatch(t *testing.T) { for _, tCase := range tt { g := gomega.NewGomegaWithT(t) - ginkgo.By(tCase.name) var result []int batchNum := 0 err := Batch[int](tCase.batchSize, tCase.data, func(l []int) error { @@ -199,7 +198,6 @@ func TestBatchMap(t *testing.T) { for _, tCase := range tt { g := gomega.NewGomegaWithT(t) - ginkgo.By(tCase.name) result := map[string][]int{} batchNum := 0 err := BatchMap[int](tCase.batchSize, tCase.data, func(l map[string][]int) error { diff --git a/go-controller/pkg/util/dpu_annotations_test.go b/go-controller/pkg/util/dpu_annotations_test.go index e79f12508f..e879d926eb 100644 --- a/go-controller/pkg/util/dpu_annotations_test.go +++ b/go-controller/pkg/util/dpu_annotations_test.go @@ -1,7 +1,7 @@ package util import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/util/egressip_annotation.go b/go-controller/pkg/util/egressip_annotation.go new file mode 100644 index 0000000000..9f2ed76a23 --- /dev/null +++ b/go-controller/pkg/util/egressip_annotation.go @@ -0,0 +1,68 @@ +package util + +import ( + "fmt" + "strconv" +) + +const ( + EgressIPMarkAnnotation = "k8s.ovn.org/egressip-mark" + EgressIPMarkBase = 50000 + EgressIPMarkMax = 55000 +) + +type EgressIPMark struct { + strValue string + intValue int +} + +func (em EgressIPMark) String() string { + return em.strValue +} + +func (em EgressIPMark) ToInt() int { + return em.intValue +} + +func (em EgressIPMark) IsValid() bool { + return IsEgressIPMarkValid(em.intValue) +} + +func (em EgressIPMark) IsAvailable() bool { + return em.strValue != "" +} + +func ParseEgressIPMark(annotations map[string]string) (EgressIPMark, error) { + eipMark := EgressIPMark{} + if annotations == nil { + return eipMark, fmt.Errorf("failed to parse EgressIP mark from annotation because annotation is nil") + } + markStr, ok := annotations[EgressIPMarkAnnotation] + if !ok { + return eipMark, nil + } + eipMark.strValue = markStr + mark, err := strconv.Atoi(markStr) + if err != nil { + return eipMark, fmt.Errorf("failed to parse EgressIP mark annotation string %q to an integer", markStr) + } + eipMark.intValue = mark + return eipMark, nil +} + +func IsEgressIPMarkSet(annotations map[string]string) bool { + if annotations == nil { + return false + } + _, ok := annotations[EgressIPMarkAnnotation] + return ok +} + +func IsEgressIPMarkValid(mark int) bool { + return mark >= EgressIPMarkBase && mark <= EgressIPMarkMax +} + +// EgressIPMarkAnnotationChanged returns true if the EgressIP mark annotation changed +func EgressIPMarkAnnotationChanged(annotationA, annotationB map[string]string) bool { + return annotationA[EgressIPMarkAnnotation] != annotationB[EgressIPMarkAnnotation] +} diff --git a/go-controller/pkg/util/fake_client.go b/go-controller/pkg/util/fake_client.go index 3da55dc8b5..e11ec5bb39 100644 --- a/go-controller/pkg/util/fake_client.go +++ b/go-controller/pkg/util/fake_client.go @@ -19,11 +19,15 @@ import ( egressqosfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/fake" egressservice "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1" egressservicefake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/clientset/versioned/fake" + routeadvertisements "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1" + routeadvertisementsfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/fake" + udnv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1" udnfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/fake" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/fake" anpapi "sigs.k8s.io/network-policy-api/apis/v1alpha1" @@ -42,6 +46,8 @@ func GetOVNClientset(objects ...runtime.Object) *OVNClientset { nads := []runtime.Object{} cloudObjects := []runtime.Object{} dnsNameResolverObjects := []runtime.Object{} + udnObjects := []runtime.Object{} + raObjects := []runtime.Object{} for _, object := range objects { switch object.(type) { case *egressip.EgressIP: @@ -64,23 +70,52 @@ func GetOVNClientset(objects ...runtime.Object) *OVNClientset { anpObjects = append(anpObjects, object) case *ocpnetworkapiv1alpha1.DNSNameResolver: dnsNameResolverObjects = append(dnsNameResolverObjects, object) + case *udnv1.UserDefinedNetwork, *udnv1.ClusterUserDefinedNetwork: + udnObjects = append(udnObjects, object) + case *routeadvertisements.RouteAdvertisements: + raObjects = append(raObjects, object) default: v1Objects = append(v1Objects, object) } } + + nadClient := nadfake.NewSimpleClientset(nads...) + // the NAD fake-client tracker must be populated manually because the NAD CRD use arbitrary API registration name + // that cannot be resolved by the underlying API machinery [1] [2]. + // [1] https://github.com/ovn-org/ovn-kubernetes/blob/65c79af35b2c22f90c863debefa15c4fb1f088cb/go-controller/vendor/k8s.io/client-go/testing/fixture.go#L341 + // [2] https://github.com/ovn-org/ovn-kubernetes/commit/434b0590ce8c61ade75edc996b2f7f83d530f840#diff-ae287d8b2b115068905d4b5bf477d0e8cb6586d271fe872ca3b17acc94f21075R140 + populateTracker(nadClient, nads...) + return &OVNClientset{ - KubeClient: fake.NewSimpleClientset(v1Objects...), - ANPClient: anpfake.NewSimpleClientset(anpObjects...), - EgressIPClient: egressipfake.NewSimpleClientset(egressIPObjects...), - EgressFirewallClient: egressfirewallfake.NewSimpleClientset(egressFirewallObjects...), - CloudNetworkClient: cloudservicefake.NewSimpleClientset(cloudObjects...), - EgressQoSClient: egressqosfake.NewSimpleClientset(egressQoSObjects...), - NetworkAttchDefClient: nadfake.NewSimpleClientset(nads...), - MultiNetworkPolicyClient: mnpfake.NewSimpleClientset(multiNetworkPolicyObjects...), - EgressServiceClient: egressservicefake.NewSimpleClientset(egressServiceObjects...), - AdminPolicyRouteClient: adminpolicybasedroutefake.NewSimpleClientset(apbExternalRouteObjects...), - OCPNetworkClient: ocpnetworkclientfake.NewSimpleClientset(dnsNameResolverObjects...), - UserDefinedNetworkClient: udnfake.NewSimpleClientset(), + KubeClient: fake.NewSimpleClientset(v1Objects...), + ANPClient: anpfake.NewSimpleClientset(anpObjects...), + EgressIPClient: egressipfake.NewSimpleClientset(egressIPObjects...), + EgressFirewallClient: egressfirewallfake.NewSimpleClientset(egressFirewallObjects...), + CloudNetworkClient: cloudservicefake.NewSimpleClientset(cloudObjects...), + EgressQoSClient: egressqosfake.NewSimpleClientset(egressQoSObjects...), + NetworkAttchDefClient: nadClient, + MultiNetworkPolicyClient: mnpfake.NewSimpleClientset(multiNetworkPolicyObjects...), + EgressServiceClient: egressservicefake.NewSimpleClientset(egressServiceObjects...), + AdminPolicyRouteClient: adminpolicybasedroutefake.NewSimpleClientset(apbExternalRouteObjects...), + OCPNetworkClient: ocpnetworkclientfake.NewSimpleClientset(dnsNameResolverObjects...), + UserDefinedNetworkClient: udnfake.NewSimpleClientset(udnObjects...), + RouteAdvertisementsClient: routeadvertisementsfake.NewSimpleClientset(raObjects...), + } +} + +// populateTracker populate the NAD fake-client internal tracker with NAD objects +func populateTracker(nadClient *nadfake.Clientset, objects ...runtime.Object) { + nadGVR := schema.GroupVersionResource(metav1.GroupVersionResource{ + Group: "k8s.cni.cncf.io", + Version: "v1", + Resource: "network-attachment-definitions", + }) + for _, obj := range objects { + if nad, ok := obj.(*nettypes.NetworkAttachmentDefinition); ok { + if err := nadClient.Tracker().Create(nadGVR, nad, nad.Namespace); err != nil { + panic(err) + } + } } } diff --git a/go-controller/pkg/util/iptables.go b/go-controller/pkg/util/iptables.go index a8d753610e..6d76931c2e 100644 --- a/go-controller/pkg/util/iptables.go +++ b/go-controller/pkg/util/iptables.go @@ -156,10 +156,11 @@ func (f *FakeIPTables) List(tableName, chainName string) ([]string, error) { if err != nil { return nil, err } + ret := make([]string, len(chain)) for i := range chain { - chain[i] = fmt.Sprintf("-A %s %s", chainName, chain[i]) + ret[i] = fmt.Sprintf("-A %s %s", chainName, chain[i]) } - return chain, nil + return ret, nil } // ListChains returns the names of all chains in the table diff --git a/go-controller/pkg/util/kube.go b/go-controller/pkg/util/kube.go index 3523e6e4e4..60dc77b412 100644 --- a/go-controller/pkg/util/kube.go +++ b/go-controller/pkg/util/kube.go @@ -18,9 +18,7 @@ import ( certificatesv1 "k8s.io/api/certificates/v1" kapi "k8s.io/api/core/v1" discovery "k8s.io/api/discovery/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - k8sruntime "k8s.io/apimachinery/pkg/runtime" k8stypes "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" @@ -48,82 +46,88 @@ import ( egressipclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/clientset/versioned" egressqosclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned" egressserviceclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/clientset/versioned" + routeadvertisementsclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned" userdefinednetworkclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" anpclientset "sigs.k8s.io/network-policy-api/pkg/client/clientset/versioned" ) // OVNClientset is a wrapper around all clientsets used by OVN-Kubernetes type OVNClientset struct { - KubeClient kubernetes.Interface - ANPClient anpclientset.Interface - EgressIPClient egressipclientset.Interface - EgressFirewallClient egressfirewallclientset.Interface - OCPNetworkClient ocpnetworkclientset.Interface - CloudNetworkClient ocpcloudnetworkclientset.Interface - EgressQoSClient egressqosclientset.Interface - NetworkAttchDefClient networkattchmentdefclientset.Interface - MultiNetworkPolicyClient multinetworkpolicyclientset.Interface - EgressServiceClient egressserviceclientset.Interface - AdminPolicyRouteClient adminpolicybasedrouteclientset.Interface - IPAMClaimsClient ipamclaimssclientset.Interface - UserDefinedNetworkClient userdefinednetworkclientset.Interface + KubeClient kubernetes.Interface + ANPClient anpclientset.Interface + EgressIPClient egressipclientset.Interface + EgressFirewallClient egressfirewallclientset.Interface + OCPNetworkClient ocpnetworkclientset.Interface + CloudNetworkClient ocpcloudnetworkclientset.Interface + EgressQoSClient egressqosclientset.Interface + NetworkAttchDefClient networkattchmentdefclientset.Interface + MultiNetworkPolicyClient multinetworkpolicyclientset.Interface + EgressServiceClient egressserviceclientset.Interface + AdminPolicyRouteClient adminpolicybasedrouteclientset.Interface + IPAMClaimsClient ipamclaimssclientset.Interface + UserDefinedNetworkClient userdefinednetworkclientset.Interface + RouteAdvertisementsClient routeadvertisementsclientset.Interface } // OVNMasterClientset type OVNMasterClientset struct { - KubeClient kubernetes.Interface - ANPClient anpclientset.Interface - EgressIPClient egressipclientset.Interface - CloudNetworkClient ocpcloudnetworkclientset.Interface - EgressFirewallClient egressfirewallclientset.Interface - OCPNetworkClient ocpnetworkclientset.Interface - EgressQoSClient egressqosclientset.Interface - MultiNetworkPolicyClient multinetworkpolicyclientset.Interface - EgressServiceClient egressserviceclientset.Interface - AdminPolicyRouteClient adminpolicybasedrouteclientset.Interface - IPAMClaimsClient ipamclaimssclientset.Interface - NetworkAttchDefClient networkattchmentdefclientset.Interface - UserDefinedNetworkClient userdefinednetworkclientset.Interface + KubeClient kubernetes.Interface + ANPClient anpclientset.Interface + EgressIPClient egressipclientset.Interface + CloudNetworkClient ocpcloudnetworkclientset.Interface + EgressFirewallClient egressfirewallclientset.Interface + OCPNetworkClient ocpnetworkclientset.Interface + EgressQoSClient egressqosclientset.Interface + MultiNetworkPolicyClient multinetworkpolicyclientset.Interface + EgressServiceClient egressserviceclientset.Interface + AdminPolicyRouteClient adminpolicybasedrouteclientset.Interface + IPAMClaimsClient ipamclaimssclientset.Interface + NetworkAttchDefClient networkattchmentdefclientset.Interface + UserDefinedNetworkClient userdefinednetworkclientset.Interface + RouteAdvertisementsClient routeadvertisementsclientset.Interface } // OVNNetworkControllerManagerClientset type OVNKubeControllerClientset struct { - KubeClient kubernetes.Interface - ANPClient anpclientset.Interface - EgressIPClient egressipclientset.Interface - EgressFirewallClient egressfirewallclientset.Interface - OCPNetworkClient ocpnetworkclientset.Interface - EgressQoSClient egressqosclientset.Interface - MultiNetworkPolicyClient multinetworkpolicyclientset.Interface - EgressServiceClient egressserviceclientset.Interface - AdminPolicyRouteClient adminpolicybasedrouteclientset.Interface - IPAMClaimsClient ipamclaimssclientset.Interface - NetworkAttchDefClient networkattchmentdefclientset.Interface - UserDefinedNetworkClient userdefinednetworkclientset.Interface + KubeClient kubernetes.Interface + ANPClient anpclientset.Interface + EgressIPClient egressipclientset.Interface + EgressFirewallClient egressfirewallclientset.Interface + OCPNetworkClient ocpnetworkclientset.Interface + EgressQoSClient egressqosclientset.Interface + MultiNetworkPolicyClient multinetworkpolicyclientset.Interface + EgressServiceClient egressserviceclientset.Interface + AdminPolicyRouteClient adminpolicybasedrouteclientset.Interface + IPAMClaimsClient ipamclaimssclientset.Interface + NetworkAttchDefClient networkattchmentdefclientset.Interface + UserDefinedNetworkClient userdefinednetworkclientset.Interface + RouteAdvertisementsClient routeadvertisementsclientset.Interface } type OVNNodeClientset struct { - KubeClient kubernetes.Interface - EgressServiceClient egressserviceclientset.Interface - EgressIPClient egressipclientset.Interface - AdminPolicyRouteClient adminpolicybasedrouteclientset.Interface - NetworkAttchDefClient networkattchmentdefclientset.Interface + KubeClient kubernetes.Interface + EgressServiceClient egressserviceclientset.Interface + EgressIPClient egressipclientset.Interface + AdminPolicyRouteClient adminpolicybasedrouteclientset.Interface + NetworkAttchDefClient networkattchmentdefclientset.Interface + UserDefinedNetworkClient userdefinednetworkclientset.Interface + RouteAdvertisementsClient routeadvertisementsclientset.Interface } type OVNClusterManagerClientset struct { - KubeClient kubernetes.Interface - ANPClient anpclientset.Interface - EgressIPClient egressipclientset.Interface - CloudNetworkClient ocpcloudnetworkclientset.Interface - NetworkAttchDefClient networkattchmentdefclientset.Interface - EgressServiceClient egressserviceclientset.Interface - AdminPolicyRouteClient adminpolicybasedrouteclientset.Interface - EgressFirewallClient egressfirewallclientset.Interface - EgressQoSClient egressqosclientset.Interface - IPAMClaimsClient ipamclaimssclientset.Interface - OCPNetworkClient ocpnetworkclientset.Interface - UserDefinedNetworkClient userdefinednetworkclientset.Interface + KubeClient kubernetes.Interface + ANPClient anpclientset.Interface + EgressIPClient egressipclientset.Interface + CloudNetworkClient ocpcloudnetworkclientset.Interface + NetworkAttchDefClient networkattchmentdefclientset.Interface + EgressServiceClient egressserviceclientset.Interface + AdminPolicyRouteClient adminpolicybasedrouteclientset.Interface + EgressFirewallClient egressfirewallclientset.Interface + EgressQoSClient egressqosclientset.Interface + IPAMClaimsClient ipamclaimssclientset.Interface + OCPNetworkClient ocpnetworkclientset.Interface + UserDefinedNetworkClient userdefinednetworkclientset.Interface + RouteAdvertisementsClient routeadvertisementsclientset.Interface } const ( @@ -138,88 +142,96 @@ var ( func (cs *OVNClientset) GetMasterClientset() *OVNMasterClientset { return &OVNMasterClientset{ - KubeClient: cs.KubeClient, - ANPClient: cs.ANPClient, - EgressIPClient: cs.EgressIPClient, - CloudNetworkClient: cs.CloudNetworkClient, - EgressFirewallClient: cs.EgressFirewallClient, - OCPNetworkClient: cs.OCPNetworkClient, - EgressQoSClient: cs.EgressQoSClient, - MultiNetworkPolicyClient: cs.MultiNetworkPolicyClient, - EgressServiceClient: cs.EgressServiceClient, - AdminPolicyRouteClient: cs.AdminPolicyRouteClient, - IPAMClaimsClient: cs.IPAMClaimsClient, - NetworkAttchDefClient: cs.NetworkAttchDefClient, - UserDefinedNetworkClient: cs.UserDefinedNetworkClient, + KubeClient: cs.KubeClient, + ANPClient: cs.ANPClient, + EgressIPClient: cs.EgressIPClient, + CloudNetworkClient: cs.CloudNetworkClient, + EgressFirewallClient: cs.EgressFirewallClient, + OCPNetworkClient: cs.OCPNetworkClient, + EgressQoSClient: cs.EgressQoSClient, + MultiNetworkPolicyClient: cs.MultiNetworkPolicyClient, + EgressServiceClient: cs.EgressServiceClient, + AdminPolicyRouteClient: cs.AdminPolicyRouteClient, + IPAMClaimsClient: cs.IPAMClaimsClient, + NetworkAttchDefClient: cs.NetworkAttchDefClient, + UserDefinedNetworkClient: cs.UserDefinedNetworkClient, + RouteAdvertisementsClient: cs.RouteAdvertisementsClient, } } func (cs *OVNMasterClientset) GetOVNKubeControllerClientset() *OVNKubeControllerClientset { return &OVNKubeControllerClientset{ - KubeClient: cs.KubeClient, - ANPClient: cs.ANPClient, - EgressIPClient: cs.EgressIPClient, - EgressFirewallClient: cs.EgressFirewallClient, - OCPNetworkClient: cs.OCPNetworkClient, - EgressQoSClient: cs.EgressQoSClient, - MultiNetworkPolicyClient: cs.MultiNetworkPolicyClient, - EgressServiceClient: cs.EgressServiceClient, - AdminPolicyRouteClient: cs.AdminPolicyRouteClient, - IPAMClaimsClient: cs.IPAMClaimsClient, - NetworkAttchDefClient: cs.NetworkAttchDefClient, + KubeClient: cs.KubeClient, + ANPClient: cs.ANPClient, + EgressIPClient: cs.EgressIPClient, + EgressFirewallClient: cs.EgressFirewallClient, + OCPNetworkClient: cs.OCPNetworkClient, + EgressQoSClient: cs.EgressQoSClient, + MultiNetworkPolicyClient: cs.MultiNetworkPolicyClient, + EgressServiceClient: cs.EgressServiceClient, + AdminPolicyRouteClient: cs.AdminPolicyRouteClient, + IPAMClaimsClient: cs.IPAMClaimsClient, + NetworkAttchDefClient: cs.NetworkAttchDefClient, + UserDefinedNetworkClient: cs.UserDefinedNetworkClient, + RouteAdvertisementsClient: cs.RouteAdvertisementsClient, } } func (cs *OVNClientset) GetOVNKubeControllerClientset() *OVNKubeControllerClientset { return &OVNKubeControllerClientset{ - KubeClient: cs.KubeClient, - ANPClient: cs.ANPClient, - EgressIPClient: cs.EgressIPClient, - EgressFirewallClient: cs.EgressFirewallClient, - OCPNetworkClient: cs.OCPNetworkClient, - EgressQoSClient: cs.EgressQoSClient, - MultiNetworkPolicyClient: cs.MultiNetworkPolicyClient, - EgressServiceClient: cs.EgressServiceClient, - AdminPolicyRouteClient: cs.AdminPolicyRouteClient, - IPAMClaimsClient: cs.IPAMClaimsClient, - NetworkAttchDefClient: cs.NetworkAttchDefClient, - UserDefinedNetworkClient: cs.UserDefinedNetworkClient, + KubeClient: cs.KubeClient, + ANPClient: cs.ANPClient, + EgressIPClient: cs.EgressIPClient, + EgressFirewallClient: cs.EgressFirewallClient, + OCPNetworkClient: cs.OCPNetworkClient, + EgressQoSClient: cs.EgressQoSClient, + MultiNetworkPolicyClient: cs.MultiNetworkPolicyClient, + EgressServiceClient: cs.EgressServiceClient, + AdminPolicyRouteClient: cs.AdminPolicyRouteClient, + IPAMClaimsClient: cs.IPAMClaimsClient, + NetworkAttchDefClient: cs.NetworkAttchDefClient, + UserDefinedNetworkClient: cs.UserDefinedNetworkClient, + RouteAdvertisementsClient: cs.RouteAdvertisementsClient, } } func (cs *OVNClientset) GetClusterManagerClientset() *OVNClusterManagerClientset { return &OVNClusterManagerClientset{ - KubeClient: cs.KubeClient, - ANPClient: cs.ANPClient, - EgressIPClient: cs.EgressIPClient, - CloudNetworkClient: cs.CloudNetworkClient, - NetworkAttchDefClient: cs.NetworkAttchDefClient, - EgressServiceClient: cs.EgressServiceClient, - AdminPolicyRouteClient: cs.AdminPolicyRouteClient, - EgressFirewallClient: cs.EgressFirewallClient, - EgressQoSClient: cs.EgressQoSClient, - IPAMClaimsClient: cs.IPAMClaimsClient, - OCPNetworkClient: cs.OCPNetworkClient, - UserDefinedNetworkClient: cs.UserDefinedNetworkClient, + KubeClient: cs.KubeClient, + ANPClient: cs.ANPClient, + EgressIPClient: cs.EgressIPClient, + CloudNetworkClient: cs.CloudNetworkClient, + NetworkAttchDefClient: cs.NetworkAttchDefClient, + EgressServiceClient: cs.EgressServiceClient, + AdminPolicyRouteClient: cs.AdminPolicyRouteClient, + EgressFirewallClient: cs.EgressFirewallClient, + EgressQoSClient: cs.EgressQoSClient, + IPAMClaimsClient: cs.IPAMClaimsClient, + OCPNetworkClient: cs.OCPNetworkClient, + UserDefinedNetworkClient: cs.UserDefinedNetworkClient, + RouteAdvertisementsClient: cs.RouteAdvertisementsClient, } } func (cs *OVNClientset) GetNodeClientset() *OVNNodeClientset { return &OVNNodeClientset{ - KubeClient: cs.KubeClient, - EgressServiceClient: cs.EgressServiceClient, - EgressIPClient: cs.EgressIPClient, - AdminPolicyRouteClient: cs.AdminPolicyRouteClient, - NetworkAttchDefClient: cs.NetworkAttchDefClient, + KubeClient: cs.KubeClient, + EgressServiceClient: cs.EgressServiceClient, + EgressIPClient: cs.EgressIPClient, + AdminPolicyRouteClient: cs.AdminPolicyRouteClient, + NetworkAttchDefClient: cs.NetworkAttchDefClient, + UserDefinedNetworkClient: cs.UserDefinedNetworkClient, + RouteAdvertisementsClient: cs.RouteAdvertisementsClient, } } func (cs *OVNMasterClientset) GetNodeClientset() *OVNNodeClientset { return &OVNNodeClientset{ - KubeClient: cs.KubeClient, - EgressServiceClient: cs.EgressServiceClient, - EgressIPClient: cs.EgressIPClient, - NetworkAttchDefClient: cs.NetworkAttchDefClient, + KubeClient: cs.KubeClient, + EgressServiceClient: cs.EgressServiceClient, + EgressIPClient: cs.EgressIPClient, + NetworkAttchDefClient: cs.NetworkAttchDefClient, + RouteAdvertisementsClient: cs.RouteAdvertisementsClient, } } @@ -492,20 +504,26 @@ func NewOVNClientset(conf *config.KubernetesConfig) (*OVNClientset, error) { return nil, err } + routeAdvertisementsClientset, err := routeadvertisementsclientset.NewForConfig(kconfig) + if err != nil { + return nil, err + } + return &OVNClientset{ - KubeClient: kclientset, - ANPClient: anpClientset, - EgressIPClient: egressIPClientset, - EgressFirewallClient: egressFirewallClientset, - OCPNetworkClient: networkClientset, - CloudNetworkClient: cloudNetworkClientset, - EgressQoSClient: egressqosClientset, - NetworkAttchDefClient: networkAttchmntDefClientset, - MultiNetworkPolicyClient: multiNetworkPolicyClientset, - EgressServiceClient: egressserviceClientset, - AdminPolicyRouteClient: adminPolicyBasedRouteClientset, - IPAMClaimsClient: ipamClaimsClientset, - UserDefinedNetworkClient: userDefinedNetworkClientSet, + KubeClient: kclientset, + ANPClient: anpClientset, + EgressIPClient: egressIPClientset, + EgressFirewallClient: egressFirewallClientset, + OCPNetworkClient: networkClientset, + CloudNetworkClient: cloudNetworkClientset, + EgressQoSClient: egressqosClientset, + NetworkAttchDefClient: networkAttchmntDefClientset, + MultiNetworkPolicyClient: multiNetworkPolicyClientset, + EgressServiceClient: egressserviceClientset, + AdminPolicyRouteClient: adminPolicyBasedRouteClientset, + IPAMClaimsClient: ipamClaimsClientset, + UserDefinedNetworkClient: userDefinedNetworkClientSet, + RouteAdvertisementsClient: routeAdvertisementsClientset, }, nil } @@ -597,24 +615,47 @@ func ServiceInternalTrafficPolicyLocal(service *kapi.Service) bool { return service.Spec.InternalTrafficPolicy != nil && *service.Spec.InternalTrafficPolicy == kapi.ServiceInternalTrafficPolicyLocal } -// GetClusterSubnets returns the v4&v6 cluster subnets in a cluster separately -func GetClusterSubnets() ([]*net.IPNet, []*net.IPNet) { - var v4ClusterSubnets = []*net.IPNet{} - var v6ClusterSubnets = []*net.IPNet{} +// GetClusterSubnetsWithHostPrefix returns the v4 and v6 cluster subnets, along with their host prefix, +// in two separate slices +func GetClusterSubnetsWithHostPrefix() ([]config.CIDRNetworkEntry, []config.CIDRNetworkEntry) { + var v4ClusterSubnets = []config.CIDRNetworkEntry{} + var v6ClusterSubnets = []config.CIDRNetworkEntry{} for _, clusterSubnet := range config.Default.ClusterSubnets { + clusterSubnet := clusterSubnet if !utilnet.IsIPv6CIDR(clusterSubnet.CIDR) { - v4ClusterSubnets = append(v4ClusterSubnets, clusterSubnet.CIDR) + v4ClusterSubnets = append(v4ClusterSubnets, clusterSubnet) } else { - v6ClusterSubnets = append(v6ClusterSubnets, clusterSubnet.CIDR) + v6ClusterSubnets = append(v6ClusterSubnets, clusterSubnet) } } return v4ClusterSubnets, v6ClusterSubnets } -// GetAllClusterSubnets returns all (v4&v6) cluster subnets in a cluster -func GetAllClusterSubnets() []*net.IPNet { - v4ClusterSubnets, v6ClusterSubnets := GetClusterSubnets() - return append(v4ClusterSubnets, v6ClusterSubnets...) +// GetClusterSubnets returns the v4 and v6 cluster subnets in two separate slices +func GetClusterSubnets() ([]*net.IPNet, []*net.IPNet) { + var v4ClusterSubnets = []*net.IPNet{} + var v6ClusterSubnets = []*net.IPNet{} + + v4ClusterSubnetsWithHostPrefix, v6ClusterSubnetsWithHostPrefix := GetClusterSubnetsWithHostPrefix() + + for _, entry := range v4ClusterSubnetsWithHostPrefix { + v4ClusterSubnets = append(v4ClusterSubnets, entry.CIDR) + } + + for _, entry := range v6ClusterSubnetsWithHostPrefix { + v6ClusterSubnets = append(v6ClusterSubnets, entry.CIDR) + } + + return v4ClusterSubnets, v6ClusterSubnets +} + +// GetAllClusterSubnetsFromEntries extracts IPNet info from CIDRNetworkEntry(s) +func GetAllClusterSubnetsFromEntries(cidrNetEntries []config.CIDRNetworkEntry) []*net.IPNet { + subnets := make([]*net.IPNet, 0, len(cidrNetEntries)) + for _, entry := range cidrNetEntries { + subnets = append(subnets, entry.CIDR) + } + return subnets } // GetNodePrimaryIP extracts the primary IP address from the node status in the API @@ -683,41 +724,6 @@ func EventRecorder(kubeClient kubernetes.Interface) record.EventRecorder { return recorder } -// UseEndpointSlices detect if Endpoints Slices are enabled in the cluster -func UseEndpointSlices(kubeClient kubernetes.Interface) bool { - if _, err := kubeClient.Discovery().ServerResourcesForGroupVersion(discovery.SchemeGroupVersion.String()); err == nil { - klog.V(2).Infof("Kubernetes Endpoint Slices enabled on the cluster: %s", discovery.SchemeGroupVersion.String()) - return true - } - return false -} - -type K8sObject interface { - metav1.Object - k8sruntime.Object -} - -func ExternalIDsForObject(obj K8sObject) map[string]string { - gk := obj.GetObjectKind().GroupVersionKind().GroupKind() - nsn := k8stypes.NamespacedName{ - Namespace: obj.GetNamespace(), - Name: obj.GetName(), - } - - if gk.String() == "" { - kinds, _, err := scheme.Scheme.ObjectKinds(obj) - if err != nil || len(kinds) == 0 || len(kinds) > 1 { - klog.Warningf("Object %v either has no GroupVersionKind or has an ambiguous GroupVersionKind: %#v, err", obj, err) - } - gk = kinds[0].GroupKind() - } - - return map[string]string{ - types.LoadBalancerOwnerExternalID: nsn.String(), - types.LoadBalancerKindExternalID: gk.String(), - } -} - // IsEndpointReady takes as input an endpoint from an endpoint slice and returns true if the endpoint is // to be considered ready. Considering as ready an endpoint with Conditions.Ready==nil // as per doc: "In most cases consumers should interpret this unknown state as ready" diff --git a/go-controller/pkg/util/kube_test.go b/go-controller/pkg/util/kube_test.go index 40504a9681..68705330ab 100644 --- a/go-controller/pkg/util/kube_test.go +++ b/go-controller/pkg/util/kube_test.go @@ -8,8 +8,7 @@ import ( "testing" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" - kube_test "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + kubetest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" "github.com/stretchr/testify/assert" kapi "k8s.io/api/core/v1" @@ -457,39 +456,6 @@ func TestPodScheduled(t *testing.T) { } } -func TestExternalIDsForObject(t *testing.T) { - assert.Equal(t, - ExternalIDsForObject(&v1.Service{ - TypeMeta: metav1.TypeMeta{ - Kind: "Service", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "svc-ab23", - Namespace: "ns", - Labels: map[string]string{discovery.LabelServiceName: "svc"}, - }, - }), - map[string]string{ - types.LoadBalancerKindExternalID: "Service", - types.LoadBalancerOwnerExternalID: "ns/svc-ab23", - }) - - assert.Equal(t, - ExternalIDsForObject(&v1.Service{ - // also handle no TypeMeta, which can happen. - ObjectMeta: metav1.ObjectMeta{ - Name: "svc-ab23", - Namespace: "ns", - Labels: map[string]string{discovery.LabelServiceName: "svc"}, - }, - }), - map[string]string{ - types.LoadBalancerKindExternalID: "Service", - types.LoadBalancerOwnerExternalID: "ns/svc-ab23", - }) -} - var ( testNode string = "testNode" otherNode = "otherNode" @@ -697,9 +663,9 @@ func TestGetEligibleEndpointAddresses(t *testing.T) { { "Get all eligible endpoints from an endpointslice with all ready endpoints, one of which is on a different node", []discovery.Endpoint{ - kube_test.MakeReadyEndpoint(testNode, ep1Address), - kube_test.MakeReadyEndpoint(testNode, ep2Address), - kube_test.MakeReadyEndpoint(otherNode, ep3Address), + kubetest.MakeReadyEndpoint(testNode, ep1Address), + kubetest.MakeReadyEndpoint(testNode, ep2Address), + kubetest.MakeReadyEndpoint(otherNode, ep3Address), }, "", // get all endpoints []string{ep1Address, ep2Address, ep3Address}, @@ -707,9 +673,9 @@ func TestGetEligibleEndpointAddresses(t *testing.T) { { "Get all eligible local endpoints from an endpointslice with all ready endpoints, one of which is on a different node", []discovery.Endpoint{ - kube_test.MakeReadyEndpoint(testNode, ep1Address), - kube_test.MakeReadyEndpoint(testNode, ep2Address), - kube_test.MakeReadyEndpoint(otherNode, ep3Address), + kubetest.MakeReadyEndpoint(testNode, ep1Address), + kubetest.MakeReadyEndpoint(testNode, ep2Address), + kubetest.MakeReadyEndpoint(otherNode, ep3Address), }, testNode, []string{ep1Address, ep2Address}, @@ -717,9 +683,9 @@ func TestGetEligibleEndpointAddresses(t *testing.T) { { "Get all eligible local endpoints from an endpointslice with all ready endpoints, all of which are on another node", []discovery.Endpoint{ - kube_test.MakeReadyEndpoint(otherNode, ep1Address), - kube_test.MakeReadyEndpoint(otherNode, ep2Address), - kube_test.MakeReadyEndpoint(otherNode, ep3Address), + kubetest.MakeReadyEndpoint(otherNode, ep1Address), + kubetest.MakeReadyEndpoint(otherNode, ep2Address), + kubetest.MakeReadyEndpoint(otherNode, ep3Address), }, testNode, []string{}, @@ -727,9 +693,9 @@ func TestGetEligibleEndpointAddresses(t *testing.T) { { "Get all eligible endpoints from an endpointslice with all non-ready, serving, terminating endpoints, one of which is on a different node", []discovery.Endpoint{ - kube_test.MakeTerminatingServingEndpoint(testNode, ep1Address), - kube_test.MakeTerminatingServingEndpoint(testNode, ep2Address), - kube_test.MakeTerminatingServingEndpoint(otherNode, ep3Address), + kubetest.MakeTerminatingServingEndpoint(testNode, ep1Address), + kubetest.MakeTerminatingServingEndpoint(testNode, ep2Address), + kubetest.MakeTerminatingServingEndpoint(otherNode, ep3Address), }, "", []string{ep1Address, ep2Address, ep3Address}, // with no ready endpoints, we fallback to terminating serving endpoints @@ -737,9 +703,9 @@ func TestGetEligibleEndpointAddresses(t *testing.T) { { "Get all eligible local endpoints from an endpointslice with all non-ready, serving, terminating endpoints, one of which is on a different node", []discovery.Endpoint{ - kube_test.MakeTerminatingServingEndpoint(testNode, ep1Address), - kube_test.MakeTerminatingServingEndpoint(testNode, ep2Address), - kube_test.MakeTerminatingServingEndpoint(otherNode, ep3Address), + kubetest.MakeTerminatingServingEndpoint(testNode, ep1Address), + kubetest.MakeTerminatingServingEndpoint(testNode, ep2Address), + kubetest.MakeTerminatingServingEndpoint(otherNode, ep3Address), }, testNode, []string{ep1Address, ep2Address}, // with no ready endpoints, we fallback to terminating serving endpoints @@ -747,9 +713,9 @@ func TestGetEligibleEndpointAddresses(t *testing.T) { { "Get all eligible local endpoints from an endpointslice with all non-ready, serving, terminating endpoints, all of which are on a different node", []discovery.Endpoint{ - kube_test.MakeTerminatingServingEndpoint(otherNode, ep1Address), - kube_test.MakeTerminatingServingEndpoint(otherNode, ep2Address), - kube_test.MakeTerminatingServingEndpoint(otherNode, ep3Address), + kubetest.MakeTerminatingServingEndpoint(otherNode, ep1Address), + kubetest.MakeTerminatingServingEndpoint(otherNode, ep2Address), + kubetest.MakeTerminatingServingEndpoint(otherNode, ep3Address), }, testNode, []string{}, @@ -757,9 +723,9 @@ func TestGetEligibleEndpointAddresses(t *testing.T) { { "Get all eligible endpoints from an endpointslice with all non-ready, non-serving, terminating endpoints, one of which is on a different node", []discovery.Endpoint{ - kube_test.MakeTerminatingNonServingEndpoint(testNode, ep1Address), - kube_test.MakeTerminatingNonServingEndpoint(testNode, ep2Address), - kube_test.MakeTerminatingNonServingEndpoint(otherNode, ep3Address), + kubetest.MakeTerminatingNonServingEndpoint(testNode, ep1Address), + kubetest.MakeTerminatingNonServingEndpoint(testNode, ep2Address), + kubetest.MakeTerminatingNonServingEndpoint(otherNode, ep3Address), }, "", []string{}, @@ -767,9 +733,9 @@ func TestGetEligibleEndpointAddresses(t *testing.T) { { "Get all eligible local endpoints from an endpointslice with all non-ready, non-serving, terminating endpoints, one of which is on a different node", []discovery.Endpoint{ - kube_test.MakeTerminatingNonServingEndpoint(testNode, ep1Address), - kube_test.MakeTerminatingNonServingEndpoint(testNode, ep2Address), - kube_test.MakeTerminatingNonServingEndpoint(otherNode, ep3Address), + kubetest.MakeTerminatingNonServingEndpoint(testNode, ep1Address), + kubetest.MakeTerminatingNonServingEndpoint(testNode, ep2Address), + kubetest.MakeTerminatingNonServingEndpoint(otherNode, ep3Address), }, testNode, []string{}, @@ -777,9 +743,9 @@ func TestGetEligibleEndpointAddresses(t *testing.T) { { "Get all eligible local endpoints from an endpointslice with endpoints showing a mix of status conditions, one of which is on a different node", []discovery.Endpoint{ - kube_test.MakeReadyEndpoint(testNode, ep1Address), - kube_test.MakeTerminatingServingEndpoint(testNode, ep2Address), - kube_test.MakeTerminatingNonServingEndpoint(otherNode, ep3Address), + kubetest.MakeReadyEndpoint(testNode, ep1Address), + kubetest.MakeTerminatingServingEndpoint(testNode, ep2Address), + kubetest.MakeTerminatingNonServingEndpoint(otherNode, ep3Address), }, testNode, []string{ep1Address}, // only the ready endpoint is included @@ -787,9 +753,9 @@ func TestGetEligibleEndpointAddresses(t *testing.T) { { "Get all eligible local endpoints from an endpointslice with endpoints showing a mix of status conditions, all of which are on a different node", []discovery.Endpoint{ - kube_test.MakeReadyEndpoint(otherNode, ep1Address), - kube_test.MakeTerminatingServingEndpoint(otherNode, ep2Address), - kube_test.MakeTerminatingNonServingEndpoint(otherNode, ep3Address), + kubetest.MakeReadyEndpoint(otherNode, ep1Address), + kubetest.MakeTerminatingServingEndpoint(otherNode, ep2Address), + kubetest.MakeTerminatingNonServingEndpoint(otherNode, ep3Address), }, testNode, []string{}, @@ -797,9 +763,9 @@ func TestGetEligibleEndpointAddresses(t *testing.T) { { "Get all eligible endpoints from an endpointslice where all local endpoints are serving and terminating and a remote endpoint is ready", []discovery.Endpoint{ - kube_test.MakeTerminatingServingEndpoint(testNode, ep1Address), - kube_test.MakeTerminatingServingEndpoint(testNode, ep2Address), - kube_test.MakeReadyEndpoint(otherNode, ep3Address), + kubetest.MakeTerminatingServingEndpoint(testNode, ep1Address), + kubetest.MakeTerminatingServingEndpoint(testNode, ep2Address), + kubetest.MakeReadyEndpoint(otherNode, ep3Address), }, "", []string{ep3Address}, // fallback to serving&terminating should apply @@ -807,9 +773,9 @@ func TestGetEligibleEndpointAddresses(t *testing.T) { { "Get all eligible local endpoints from an endpointslice where all local endpoints are serving and terminating and a remote endpoint is ready", []discovery.Endpoint{ - kube_test.MakeTerminatingServingEndpoint(testNode, ep1Address), - kube_test.MakeTerminatingServingEndpoint(testNode, ep2Address), - kube_test.MakeReadyEndpoint(otherNode, ep3Address), + kubetest.MakeTerminatingServingEndpoint(testNode, ep1Address), + kubetest.MakeTerminatingServingEndpoint(testNode, ep2Address), + kubetest.MakeReadyEndpoint(otherNode, ep3Address), }, testNode, []string{ep1Address, ep2Address}, // fallback to serving&terminating should apply @@ -817,9 +783,9 @@ func TestGetEligibleEndpointAddresses(t *testing.T) { { "Get all eligible endpoints from an endpointslice where all local endpoints are terminating and not serving and a remote endpoint is ready", []discovery.Endpoint{ - kube_test.MakeTerminatingNonServingEndpoint(testNode, ep1Address), - kube_test.MakeTerminatingNonServingEndpoint(testNode, ep2Address), - kube_test.MakeReadyEndpoint(otherNode, ep3Address), + kubetest.MakeTerminatingNonServingEndpoint(testNode, ep1Address), + kubetest.MakeTerminatingNonServingEndpoint(testNode, ep2Address), + kubetest.MakeReadyEndpoint(otherNode, ep3Address), }, "", []string{ep3Address}, @@ -827,9 +793,9 @@ func TestGetEligibleEndpointAddresses(t *testing.T) { { "Get all eligible local endpoints from an endpointslice where all local endpoints are terminating and not serving and a remote endpoint is ready", []discovery.Endpoint{ - kube_test.MakeTerminatingNonServingEndpoint(testNode, ep1Address), - kube_test.MakeTerminatingNonServingEndpoint(testNode, ep2Address), - kube_test.MakeReadyEndpoint(otherNode, ep3Address), + kubetest.MakeTerminatingNonServingEndpoint(testNode, ep1Address), + kubetest.MakeTerminatingNonServingEndpoint(testNode, ep2Address), + kubetest.MakeReadyEndpoint(otherNode, ep3Address), }, testNode, []string{}, diff --git a/go-controller/pkg/util/multi_network.go b/go-controller/pkg/util/multi_network.go index 7baaefce8e..2af60e6019 100644 --- a/go-controller/pkg/util/multi_network.go +++ b/go-controller/pkg/util/multi_network.go @@ -15,6 +15,7 @@ import ( knet "k8s.io/utils/net" nettypes "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" @@ -42,6 +43,7 @@ type BasicNetInfo interface { JoinSubnets() []*net.IPNet Vlan() uint AllowsPersistentIPs() bool + PhysicalNetworkName() string // utility methods Equals(BasicNetInfo) bool @@ -55,6 +57,9 @@ type BasicNetInfo interface { GetNetworkScopedExtSwitchName(nodeName string) string GetNetworkScopedPatchPortName(bridgeID, nodeName string) string GetNetworkScopedExtPortName(bridgeID, nodeName string) string + GetNetworkScopedLoadBalancerName(lbName string) string + GetNetworkScopedLoadBalancerGroupName(lbGroupName string) string + GetNetworkScopedClusterSubnetSNATMatch(nodeName string) string } // NetInfo correlates which NADs refer to a network in addition to the basic @@ -138,6 +143,18 @@ func (nInfo *DefaultNetInfo) GetNetworkScopedExtPortName(bridgeID, nodeName stri return GetExtPortName(bridgeID, nInfo.GetNetworkScopedName(nodeName)) } +func (nInfo *DefaultNetInfo) GetNetworkScopedLoadBalancerName(lbName string) string { + return nInfo.GetNetworkScopedName(lbName) +} + +func (nInfo *DefaultNetInfo) GetNetworkScopedLoadBalancerGroupName(lbGroupName string) string { + return nInfo.GetNetworkScopedName(lbGroupName) +} + +func (nInfo *DefaultNetInfo) GetNetworkScopedClusterSubnetSNATMatch(nodeName string) string { + return "" +} + // GetNADs returns the NADs associated with the network, no op for default // network func (nInfo *DefaultNetInfo) GetNADs() []string { @@ -248,6 +265,11 @@ func (nInfo *DefaultNetInfo) AllowsPersistentIPs() bool { return false } +// PhysicalNetworkName has no impact on defaultNetConfInfo (localnet feature) +func (nInfo *DefaultNetInfo) PhysicalNetworkName() string { + return "" +} + // SecondaryNetInfo holds the network name information for secondary network if non-nil type secondaryNetInfo struct { netName string @@ -268,6 +290,8 @@ type secondaryNetInfo struct { // to be plumbed for this network sync.Mutex nadNames sets.Set[string] + + physicalNetworkName string } // GetNetworkName returns the network name @@ -318,6 +342,10 @@ func (nInfo *secondaryNetInfo) GetNetworkScopedGWRouterName(nodeName string) str } func (nInfo *secondaryNetInfo) GetNetworkScopedSwitchName(nodeName string) string { + // In Layer2Topology there is just one global switch + if nInfo.TopologyType() == types.Layer2Topology { + return fmt.Sprintf("%s%s", nInfo.getPrefix(), types.OVNLayer2Switch) + } return nInfo.GetNetworkScopedName(nodeName) } @@ -337,6 +365,21 @@ func (nInfo *secondaryNetInfo) GetNetworkScopedExtPortName(bridgeID, nodeName st return GetExtPortName(bridgeID, nInfo.GetNetworkScopedName(nodeName)) } +func (nInfo *secondaryNetInfo) GetNetworkScopedLoadBalancerName(lbName string) string { + return nInfo.GetNetworkScopedName(lbName) +} + +func (nInfo *secondaryNetInfo) GetNetworkScopedLoadBalancerGroupName(lbGroupName string) string { + return nInfo.GetNetworkScopedName(lbGroupName) +} + +func (nInfo *secondaryNetInfo) GetNetworkScopedClusterSubnetSNATMatch(nodeName string) string { + if nInfo.TopologyType() != types.Layer2Topology { + return "" + } + return fmt.Sprintf("outport == %q", types.GWRouterToExtSwitchPrefix+nInfo.GetNetworkScopedGWRouterName(nodeName)) +} + // getPrefix returns if the logical entities prefix for this network func (nInfo *secondaryNetInfo) getPrefix() string { return GetSecondaryNetworkPrefix(nInfo.netName) @@ -398,6 +441,11 @@ func (nInfo *secondaryNetInfo) AllowsPersistentIPs() bool { return nInfo.allowPersistentIPs } +// PhysicalNetworkName returns the user provided physical network name value +func (nInfo *secondaryNetInfo) PhysicalNetworkName() string { + return nInfo.physicalNetworkName +} + // IPMode returns the ipv4/ipv6 mode func (nInfo *secondaryNetInfo) IPMode() (bool, bool) { return nInfo.ipv4mode, nInfo.ipv6mode @@ -483,18 +531,19 @@ func (nInfo *secondaryNetInfo) copy() *secondaryNetInfo { // everything is immutable except the NADs c := &secondaryNetInfo{ - netName: nInfo.netName, - primaryNetwork: nInfo.primaryNetwork, - topology: nInfo.topology, - mtu: nInfo.mtu, - vlan: nInfo.vlan, - allowPersistentIPs: nInfo.allowPersistentIPs, - ipv4mode: nInfo.ipv4mode, - ipv6mode: nInfo.ipv6mode, - subnets: nInfo.subnets, - excludeSubnets: nInfo.excludeSubnets, - joinSubnets: nInfo.joinSubnets, - nadNames: nInfo.nadNames.Clone(), + netName: nInfo.netName, + primaryNetwork: nInfo.primaryNetwork, + topology: nInfo.topology, + mtu: nInfo.mtu, + vlan: nInfo.vlan, + allowPersistentIPs: nInfo.allowPersistentIPs, + ipv4mode: nInfo.ipv4mode, + ipv6mode: nInfo.ipv6mode, + subnets: nInfo.subnets, + excludeSubnets: nInfo.excludeSubnets, + joinSubnets: nInfo.joinSubnets, + nadNames: nInfo.nadNames.Clone(), + physicalNetworkName: nInfo.physicalNetworkName, } return c @@ -553,14 +602,15 @@ func newLocalnetNetConfInfo(netconf *ovncnitypes.NetConf) (NetInfo, error) { } ni := &secondaryNetInfo{ - netName: netconf.Name, - topology: types.LocalnetTopology, - subnets: subnets, - excludeSubnets: excludes, - mtu: netconf.MTU, - vlan: uint(netconf.VLANID), - allowPersistentIPs: netconf.AllowPersistentIPs, - nadNames: sets.Set[string]{}, + netName: netconf.Name, + topology: types.LocalnetTopology, + subnets: subnets, + excludeSubnets: excludes, + mtu: netconf.MTU, + vlan: uint(netconf.VLANID), + allowPersistentIPs: netconf.AllowPersistentIPs, + nadNames: sets.Set[string]{}, + physicalNetworkName: netconf.PhysicalNetworkName, } ni.ipv4mode, ni.ipv6mode = getIPMode(subnets) return ni, nil @@ -682,17 +732,32 @@ func NewNetInfo(netconf *ovncnitypes.NetConf) (NetInfo, error) { if netconf.Name == types.DefaultNetworkName { return &DefaultNetInfo{}, nil } + var ni NetInfo + var err error switch netconf.Topology { case types.Layer3Topology: - return newLayer3NetConfInfo(netconf) + ni, err = newLayer3NetConfInfo(netconf) case types.Layer2Topology: - return newLayer2NetConfInfo(netconf) + ni, err = newLayer2NetConfInfo(netconf) case types.LocalnetTopology: - return newLocalnetNetConfInfo(netconf) + ni, err = newLocalnetNetConfInfo(netconf) default: // other topology NAD can be supported later return nil, fmt.Errorf("topology %s not supported", netconf.Topology) } + if err != nil { + return nil, err + } + if ni.IsPrimaryNetwork() && ni.IsSecondary() { + ipv4Mode, ipv6Mode := ni.IPMode() + if ipv4Mode && !config.IPv4Mode { + return nil, fmt.Errorf("network %s is attempting to use ipv4 subnets but the cluster does not support ipv4", ni.GetNetworkName()) + } + if ipv6Mode && !config.IPv6Mode { + return nil, fmt.Errorf("network %s is attempting to use ipv6 subnets but the cluster does not support ipv6", ni.GetNetworkName()) + } + } + return ni, nil } // ParseNADInfo parses config in NAD spec and return a NetAttachDefInfo object for secondary networks @@ -761,6 +826,61 @@ func ValidateNetConf(nadName string, netconf *ovncnitypes.NetConf) error { return fmt.Errorf("the subnet attribute must be defined for layer2 primary user defined networks") } + if netconf.Topology != types.LocalnetTopology && netconf.Name != types.DefaultNetworkName { + if err := subnetOverlapCheck(netconf); err != nil { + return fmt.Errorf("invalid subnet configuration: %w", err) + } + } + + return nil +} + +// subnetOverlapCheck validates whether POD and join subnet mentioned in a net-attach-def with +// topology "layer2" and "layer3" does not overlap with ClusterSubnets, ServiceCIDRs, join subnet, +// and masquerade subnet. It also considers excluded subnets mentioned in a net-attach-def. +func subnetOverlapCheck(netconf *ovncnitypes.NetConf) error { + allSubnets := config.NewConfigSubnets() + for _, subnet := range config.Default.ClusterSubnets { + allSubnets.Append(config.ConfigSubnetCluster, subnet.CIDR) + } + for _, subnet := range config.Kubernetes.ServiceCIDRs { + allSubnets.Append(config.ConfigSubnetService, subnet) + } + _, v4JoinCIDR, _ := net.ParseCIDR(config.Gateway.V4JoinSubnet) + _, v6JoinCIDR, _ := net.ParseCIDR(config.Gateway.V6JoinSubnet) + + allSubnets.Append(config.ConfigSubnetJoin, v4JoinCIDR) + allSubnets.Append(config.ConfigSubnetJoin, v6JoinCIDR) + + _, v4MasqueradeCIDR, _ := net.ParseCIDR(config.Gateway.V4MasqueradeSubnet) + _, v6MasqueradeCIDR, _ := net.ParseCIDR(config.Gateway.V6MasqueradeSubnet) + + allSubnets.Append(config.ConfigSubnetMasquerade, v4MasqueradeCIDR) + allSubnets.Append(config.ConfigSubnetMasquerade, v6MasqueradeCIDR) + + ni, err := NewNetInfo(netconf) + if err != nil { + return fmt.Errorf("error while parsing subnets: %v", err) + } + for _, subnet := range ni.Subnets() { + allSubnets.Append(config.UserDefinedSubnets, subnet.CIDR) + } + + for _, subnet := range ni.JoinSubnets() { + allSubnets.Append(config.UserDefinedJoinSubnet, subnet) + } + if ni.ExcludeSubnets() != nil { + for i, configSubnet := range allSubnets.Subnets { + if IsContainedInAnyCIDR(configSubnet.Subnet, ni.ExcludeSubnets()...) { + allSubnets.Subnets = append(allSubnets.Subnets[:i], allSubnets.Subnets[i+1:]...) + } + } + } + err = allSubnets.CheckForOverlaps() + if err != nil { + return fmt.Errorf("pod or join subnet overlaps with already configured internal subnets: %v", err) + } + return nil } @@ -855,7 +975,7 @@ func GetPodNADToNetworkMappingWithActiveNetwork(pod *kapi.Pod, nInfo NetInfo, ac // Add the active network to the NSE map if it is configured activeNetworkNADs := activeNetwork.GetNADs() if len(activeNetworkNADs) < 1 { - return false, nil, fmt.Errorf("missing NADs at active network '%s' for namesapce '%s'", activeNetwork.GetNetworkName(), pod.Namespace) + return false, nil, fmt.Errorf("missing NADs at active network %q for namespace %q", activeNetwork.GetNetworkName(), pod.Namespace) } activeNetworkNADKey := strings.Split(activeNetworkNADs[0], "/") if len(networkSelections) == 0 { @@ -866,8 +986,16 @@ func GetPodNADToNetworkMappingWithActiveNetwork(pod *kapi.Pod, nInfo NetInfo, ac Name: activeNetworkNADKey[1], } + if nInfo.IsPrimaryNetwork() && AllowsPersistentIPs(nInfo) { + ipamClaimName, wasPersistentIPRequested := pod.Annotations[OvnUDNIPAMClaimName] + if wasPersistentIPRequested { + networkSelections[activeNetworkNADs[0]].IPAMClaimReference = ipamClaimName + } + } + return true, networkSelections, nil } + func IsMultiNetworkPoliciesSupportEnabled() bool { return config.OVNKubernetesFeature.EnableMultiNetwork && config.OVNKubernetesFeature.EnableMultiNetworkPolicy } @@ -876,6 +1004,12 @@ func IsNetworkSegmentationSupportEnabled() bool { return config.OVNKubernetesFeature.EnableMultiNetwork && config.OVNKubernetesFeature.EnableNetworkSegmentation } +func IsRouteAdvertisementsEnabled() bool { + // for now, we require multi-network to be enabled because we rely on NADs, + // even for the default network + return config.OVNKubernetesFeature.EnableMultiNetwork && config.OVNKubernetesFeature.EnableRouteAdvertisements +} + func DoesNetworkRequireIPAM(netInfo NetInfo) bool { return !((netInfo.TopologyType() == types.Layer2Topology || netInfo.TopologyType() == types.LocalnetTopology) && len(netInfo.Subnets()) == 0) } @@ -884,3 +1018,17 @@ func DoesNetworkRequireTunnelIDs(netInfo NetInfo) bool { // Layer2Topology with IC require that we allocate tunnel IDs for each pod return netInfo.TopologyType() == types.Layer2Topology && config.OVNKubernetesFeature.EnableInterconnect } + +func AllowsPersistentIPs(netInfo NetInfo) bool { + switch { + case netInfo.IsPrimaryNetwork(): + return netInfo.TopologyType() == types.Layer2Topology && netInfo.AllowsPersistentIPs() + + case netInfo.IsSecondary(): + return (netInfo.TopologyType() == types.Layer2Topology || netInfo.TopologyType() == types.LocalnetTopology) && + netInfo.AllowsPersistentIPs() + + default: + return false + } +} diff --git a/go-controller/pkg/util/multi_network_test.go b/go-controller/pkg/util/multi_network_test.go index 039f145d1b..a098de3e5d 100644 --- a/go-controller/pkg/util/multi_network_test.go +++ b/go-controller/pkg/util/multi_network_test.go @@ -845,6 +845,416 @@ func TestGetPodNADToNetworkMapping(t *testing.T) { } } +func TestGetPodNADToNetworkMappingWithActiveNetwork(t *testing.T) { + const ( + attachmentName = "attachment1" + namespaceName = "ns1" + networkName = "l3-network" + ) + + type testConfig struct { + desc string + inputNamespace string + inputNetConf *ovncnitypes.NetConf + inputPrimaryUDNConfig *ovncnitypes.NetConf + inputPodAnnotations map[string]string + expectedError error + expectedIsAttachmentRequested bool + expectedNetworkSelectionElements map[string]*nadv1.NetworkSelectionElement + } + + tests := []testConfig{ + { + desc: "there isn't a primary UDN", + inputNetConf: &ovncnitypes.NetConf{ + NetConf: cnitypes.NetConf{Name: networkName}, + Topology: ovntypes.Layer3Topology, + NADName: GetNADName(namespaceName, attachmentName), + }, + inputPodAnnotations: map[string]string{ + nadv1.NetworkAttachmentAnnot: GetNADName(namespaceName, attachmentName), + }, + expectedIsAttachmentRequested: true, + expectedNetworkSelectionElements: map[string]*nadv1.NetworkSelectionElement{ + "ns1/attachment1": { + Name: "attachment1", + Namespace: "ns1", + }, + }, + }, + { + desc: "the netinfo is different from the active network", + inputNetConf: &ovncnitypes.NetConf{ + NetConf: cnitypes.NetConf{Name: networkName}, + Topology: ovntypes.Layer3Topology, + NADName: GetNADName(namespaceName, attachmentName), + }, + inputPrimaryUDNConfig: &ovncnitypes.NetConf{ + NetConf: cnitypes.NetConf{Name: "another-network"}, + Topology: ovntypes.Layer3Topology, + NADName: GetNADName(namespaceName, "another-network"), + }, + inputPodAnnotations: map[string]string{ + nadv1.NetworkAttachmentAnnot: GetNADName(namespaceName, "another-network"), + }, + expectedIsAttachmentRequested: false, + }, + { + desc: "the network configuration for a primary layer2 UDN features allow persistent IPs but the pod does not request it", + inputNetConf: &ovncnitypes.NetConf{ + NetConf: cnitypes.NetConf{Name: networkName}, + Topology: ovntypes.Layer2Topology, + NADName: GetNADName(namespaceName, attachmentName), + Role: ovntypes.NetworkRolePrimary, + AllowPersistentIPs: true, + }, + inputPrimaryUDNConfig: &ovncnitypes.NetConf{ + NetConf: cnitypes.NetConf{Name: networkName}, + Topology: ovntypes.Layer2Topology, + NADName: GetNADName(namespaceName, attachmentName), + Role: ovntypes.NetworkRolePrimary, + AllowPersistentIPs: true, + }, + inputPodAnnotations: map[string]string{ + nadv1.NetworkAttachmentAnnot: GetNADName(namespaceName, "another-network"), + }, + expectedIsAttachmentRequested: true, + expectedNetworkSelectionElements: map[string]*nadv1.NetworkSelectionElement{ + "ns1/attachment1": { + Name: "attachment1", + Namespace: "ns1", + }, + }, + }, + { + desc: "the network configuration for a primary layer2 UDN features allow persistent IPs, and the pod requests it", + inputNetConf: &ovncnitypes.NetConf{ + NetConf: cnitypes.NetConf{Name: networkName}, + Topology: ovntypes.Layer2Topology, + NADName: GetNADName(namespaceName, attachmentName), + Role: ovntypes.NetworkRolePrimary, + AllowPersistentIPs: true, + }, + inputPrimaryUDNConfig: &ovncnitypes.NetConf{ + NetConf: cnitypes.NetConf{Name: networkName}, + Topology: ovntypes.Layer2Topology, + NADName: GetNADName(namespaceName, attachmentName), + Role: ovntypes.NetworkRolePrimary, + AllowPersistentIPs: true, + }, + inputPodAnnotations: map[string]string{ + nadv1.NetworkAttachmentAnnot: GetNADName(namespaceName, "another-network"), + OvnUDNIPAMClaimName: "the-one-to-the-left-of-the-pony", + }, + expectedIsAttachmentRequested: true, + expectedNetworkSelectionElements: map[string]*nadv1.NetworkSelectionElement{ + "ns1/attachment1": { + Name: "attachment1", + Namespace: "ns1", + IPAMClaimReference: "the-one-to-the-left-of-the-pony", + }, + }, + }, + { + desc: "the network configuration for a secondary layer2 UDN features allow persistent IPs and the pod requests it", + inputNetConf: &ovncnitypes.NetConf{ + NetConf: cnitypes.NetConf{Name: networkName}, + Topology: ovntypes.Layer2Topology, + NADName: GetNADName(namespaceName, attachmentName), + Role: ovntypes.NetworkRoleSecondary, + AllowPersistentIPs: true, + }, + inputPrimaryUDNConfig: &ovncnitypes.NetConf{ + NetConf: cnitypes.NetConf{Name: networkName}, + Topology: ovntypes.Layer2Topology, + NADName: GetNADName(namespaceName, attachmentName), + Role: ovntypes.NetworkRoleSecondary, + AllowPersistentIPs: true, + }, + inputPodAnnotations: map[string]string{ + nadv1.NetworkAttachmentAnnot: GetNADName(namespaceName, "another-network"), + }, + expectedIsAttachmentRequested: true, + expectedNetworkSelectionElements: map[string]*nadv1.NetworkSelectionElement{ + "ns1/attachment1": { + Name: "attachment1", + Namespace: "ns1", + }, + }, + }, + { + desc: "the network configuration for a primary layer3 UDN features allow persistent IPs and the pod requests it", + inputNetConf: &ovncnitypes.NetConf{ + NetConf: cnitypes.NetConf{Name: networkName}, + Topology: ovntypes.Layer3Topology, + NADName: GetNADName(namespaceName, attachmentName), + Role: ovntypes.NetworkRolePrimary, + AllowPersistentIPs: true, + }, + inputPrimaryUDNConfig: &ovncnitypes.NetConf{ + NetConf: cnitypes.NetConf{Name: networkName}, + Topology: ovntypes.Layer3Topology, + NADName: GetNADName(namespaceName, attachmentName), + Role: ovntypes.NetworkRolePrimary, + AllowPersistentIPs: true, + }, + inputPodAnnotations: map[string]string{ + nadv1.NetworkAttachmentAnnot: GetNADName(namespaceName, "another-network"), + OvnUDNIPAMClaimName: "the-one-to-the-left-of-the-pony", + }, + expectedIsAttachmentRequested: true, + expectedNetworkSelectionElements: map[string]*nadv1.NetworkSelectionElement{ + "ns1/attachment1": { + Name: "attachment1", + Namespace: "ns1", + }, + }, + }, + } + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + g := gomega.NewWithT(t) + netInfo, err := NewNetInfo(test.inputNetConf) + g.Expect(err).To(gomega.BeNil()) + if test.inputNetConf.NADName != "" { + netInfo.AddNADs(test.inputNetConf.NADName) + } + + var primaryUDNNetInfo NetInfo + if test.inputPrimaryUDNConfig != nil { + primaryUDNNetInfo, err = NewNetInfo(test.inputPrimaryUDNConfig) + g.Expect(err).To(gomega.BeNil()) + if test.inputPrimaryUDNConfig.NADName != "" { + primaryUDNNetInfo.AddNADs(test.inputPrimaryUDNConfig.NADName) + } + } + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: test.inputNamespace, + Annotations: test.inputPodAnnotations, + }, + } + + isAttachmentRequested, networkSelectionElements, err := GetPodNADToNetworkMappingWithActiveNetwork( + pod, + netInfo, + primaryUDNNetInfo, + ) + + if err != nil { + g.Expect(err).To(gomega.MatchError(test.expectedError)) + } + g.Expect(isAttachmentRequested).To(gomega.Equal(test.expectedIsAttachmentRequested)) + g.Expect(networkSelectionElements).To(gomega.Equal(test.expectedNetworkSelectionElements)) + }) + } +} + +func TestSubnetOverlapCheck(t *testing.T) { + _, cidr4, _ := net.ParseCIDR("10.128.0.0/14") + _, cidr6, _ := net.ParseCIDR("fe00::/16") + _, svcCidr4, _ := net.ParseCIDR("172.30.0.0/16") + _, svcCidr6, _ := net.ParseCIDR("fe01::/16") + config.Default.ClusterSubnets = []config.CIDRNetworkEntry{{cidr4, 24}, {cidr6, 64}} + config.Kubernetes.ServiceCIDRs = []*net.IPNet{svcCidr4, svcCidr6} + config.Gateway.V4MasqueradeSubnet = "169.254.169.0/29" + config.Gateway.V6MasqueradeSubnet = "fd69::/125" + config.Gateway.V4JoinSubnet = "100.64.0.0/16" + config.Gateway.V6JoinSubnet = "fd98::/64" + type testConfig struct { + desc string + inputNetAttachDefConfigSpec string + expectedError error + } + + tests := []testConfig{ + { + desc: "return error when IPv4 POD subnet in net-attach-def overlaps other subnets", + inputNetAttachDefConfigSpec: ` + { + "name": "tenantred", + "type": "ovn-k8s-cni-overlay", + "topology": "layer2", + "subnets": "10.129.0.0/16", + "joinSubnet": "100.65.0.0/24", + "primaryNetwork": true, + "netAttachDefName": "ns1/nad1" + } + `, + expectedError: fmt.Errorf("invalid subnet configuration: pod or join subnet overlaps with already configured internal subnets: " + + "illegal network configuration: user defined subnet \"10.129.0.0/16\" overlaps cluster subnet \"10.128.0.0/14\""), + }, + { + desc: "return error when IPv4 join subnet in net-attach-def overlaps other subnets", + inputNetAttachDefConfigSpec: ` + { + "name": "tenantred", + "type": "ovn-k8s-cni-overlay", + "topology": "layer2", + "subnets": "192.168.0.0/16", + "joinSubnet": "100.64.0.0/24", + "primaryNetwork": true, + "netAttachDefName": "ns1/nad1" + } + `, + expectedError: fmt.Errorf("invalid subnet configuration: pod or join subnet overlaps with already configured internal subnets: " + + "illegal network configuration: user defined join subnet \"100.64.0.0/24\" overlaps built-in join subnet \"100.64.0.0/16\""), + }, + { + desc: "return error when IPv6 POD subnet in net-attach-def overlaps other subnets", + inputNetAttachDefConfigSpec: ` + { + "name": "tenantred", + "type": "ovn-k8s-cni-overlay", + "topology": "layer2", + "subnets": "192.168.0.0/16,fe01::/24", + "joinSubnet": "100.65.0.0/24", + "primaryNetwork": true, + "netAttachDefName": "ns1/nad1" + } + `, + expectedError: fmt.Errorf("invalid subnet configuration: pod or join subnet overlaps with already configured internal subnets: " + + "illegal network configuration: user defined subnet \"fe01::/24\" overlaps service subnet \"fe01::/16\""), + }, + { + desc: "return error when IPv6 join subnet in net-attach-def overlaps other subnets", + inputNetAttachDefConfigSpec: ` + { + "name": "tenantred", + "type": "ovn-k8s-cni-overlay", + "topology": "layer2", + "subnets": "192.168.0.0/16,fe02::/24", + "joinSubnet": "100.65.0.0/24,fd69::/112", + "primaryNetwork": true, + "netAttachDefName": "ns1/nad1" + } + `, + expectedError: fmt.Errorf("invalid subnet configuration: pod or join subnet overlaps with already configured internal subnets: " + + "illegal network configuration: user defined join subnet \"fd69::/112\" overlaps masquerade subnet \"fd69::/125\""), + }, + { + desc: "excluded subnet should not be considered for overlap check", + inputNetAttachDefConfigSpec: ` + { + "name": "tenantred", + "type": "ovn-k8s-cni-overlay", + "topology": "layer2", + "subnets": "10.0.0.0/8", + "excludeSubnets": "10.128.0.0/14", + "joinSubnet": "100.65.0.0/24", + "primaryNetwork": true, + "netAttachDefName": "ns1/nad1" + } + `, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + g := gomega.NewWithT(t) + networkAttachmentDefinition := applyNADDefaults( + &nadv1.NetworkAttachmentDefinition{ + Spec: nadv1.NetworkAttachmentDefinitionSpec{ + Config: test.inputNetAttachDefConfigSpec, + }, + }) + if test.expectedError != nil { + _, err := ParseNADInfo(networkAttachmentDefinition) + g.Expect(err).To(gomega.MatchError(test.expectedError.Error())) + } else { + _, err := ParseNADInfo(networkAttachmentDefinition) + g.Expect(err).NotTo(gomega.HaveOccurred()) + } + }) + } +} + +func TestNewNetInfo(t *testing.T) { + type testConfig struct { + desc string + subnets string + ipv4Cluster bool + ipv6Cluster bool + expectedError error + } + + tests := []testConfig{ + { + desc: "ipv4 primary network in ipv4 cluster", + subnets: "192.168.200.0/16", + ipv4Cluster: true, + }, + { + desc: "ipv4 primary network in ipv6 cluster", + subnets: "192.168.200.0/16", + ipv6Cluster: true, + expectedError: fmt.Errorf("network l3-network is attempting to use ipv4 subnets but the cluster does not support ipv4"), + }, + { + desc: "ipv4 primary network in dualstack cluster", + subnets: "192.168.200.0/16", + ipv4Cluster: true, + ipv6Cluster: true, + }, + { + desc: "ipv6 primary network in ipv4 cluster", + subnets: "fda6::/48", + ipv4Cluster: true, + expectedError: fmt.Errorf("network l3-network is attempting to use ipv6 subnets but the cluster does not support ipv6"), + }, + { + desc: "ipv6 primary network in ipv6 cluster", + subnets: "fda6::/48", + ipv6Cluster: true, + }, + { + desc: "ipv6 primary network in dualstack cluster", + subnets: "fda6::/48", + ipv4Cluster: true, + ipv6Cluster: true, + }, + { + desc: "dualstack primary network in ipv4 cluster", + subnets: "192.168.200.0/16, fda6::/48", + ipv4Cluster: true, + expectedError: fmt.Errorf("network l3-network is attempting to use ipv6 subnets but the cluster does not support ipv6"), + }, + { + desc: "dualstack primary network in ipv6 cluster", + subnets: "192.168.200.0/16, fda6::/48", + ipv6Cluster: true, + expectedError: fmt.Errorf("network l3-network is attempting to use ipv4 subnets but the cluster does not support ipv4"), + }, + { + desc: "dualstack primary network in dualstack cluster", + subnets: "192.168.200.0/16, fda6::/48", + ipv4Cluster: true, + ipv6Cluster: true, + }, + } + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + inputNetConf := &ovncnitypes.NetConf{ + NetConf: cnitypes.NetConf{Name: "l3-network"}, + Topology: ovntypes.Layer3Topology, + Role: ovntypes.NetworkRolePrimary, + Subnets: test.subnets, + } + config.IPv4Mode = test.ipv4Cluster + config.IPv6Mode = test.ipv6Cluster + g := gomega.NewWithT(t) + _, err := NewNetInfo(inputNetConf) + if test.expectedError == nil { + g.Expect(err).To(gomega.BeNil()) + } else { + g.Expect(err).To(gomega.MatchError(test.expectedError.Error())) + } + }) + } +} + func applyNADDefaults(nad *nadv1.NetworkAttachmentDefinition) *nadv1.NetworkAttachmentDefinition { const ( name = "nad1" diff --git a/go-controller/pkg/util/net_linux.go b/go-controller/pkg/util/net_linux.go index 0a2cd53f3f..cbe71abb8c 100644 --- a/go-controller/pkg/util/net_linux.go +++ b/go-controller/pkg/util/net_linux.go @@ -249,6 +249,67 @@ func LinkAddrFlush(link netlink.Link) error { return nil } +// SyncAddresses ensures the link has the provided addresses only +// Ignores IPv6 LLA +// addresses should all be of the same family +func SyncAddresses(link netlink.Link, addresses []*net.IPNet) error { + if len(addresses) == 0 { + return nil + } + firstFamily := getFamily(addresses[0].IP) + for _, addr := range addresses[1:] { + if getFamily(addr.IP) != firstFamily { + return fmt.Errorf("all addresses are not the same family: %#v", addresses) + } + } + + addrs, err := netLinkOps.AddrList(link, firstFamily) + if err != nil { + return fmt.Errorf("failed to list addresses for the link %s: %v", + link.Attrs().Name, err) + } + + // desired addresses - true if already exist + matched := map[*net.IPNet]bool{} + for _, desiredAddr := range addresses { + matched[desiredAddr] = false + } + + // cycle through found addresses + for _, addr := range addrs { + if utilnet.IsIPv6(addr.IP) && addr.IP.IsLinkLocalUnicast() { + continue + } + + exists := false + for _, desiredAddr := range addresses { + if addr.IPNet.String() == desiredAddr.String() { + matched[desiredAddr] = true + exists = true + break + } + } + + // found address is not in desired list, remove it + if !exists { + if err := LinkAddrDel(link, addr.IPNet); err != nil { + return err + } + } + } + + // cycle through leftover addresses to add + for addr, alreadyExists := range matched { + if !alreadyExists { + if err := LinkAddrAdd(link, addr, 0, 0, 0); err != nil { + return err + } + } + } + + return nil +} + // LinkAddrExist returns true if the given address is present on the link func LinkAddrExist(link netlink.Link, address *net.IPNet) (bool, error) { addrs, err := netLinkOps.AddrList(link, getFamily(address.IP)) @@ -415,10 +476,10 @@ func LinkRouteGetFilteredRoute(routeFilter *netlink.Route, filterMask uint64) (* return &routes[0], nil } -// LinkRouteExists checks for existence of routes for the given subnet through gwIPStr -func LinkRouteExists(link netlink.Link, gwIP net.IP, subnet *net.IPNet) (bool, error) { +// LinkRouteGetByDstAndGw checks for existence of routes for the given subnet through gwIPStr +func LinkRouteGetByDstAndGw(link netlink.Link, gwIP net.IP, subnet *net.IPNet) (*netlink.Route, error) { route, err := LinkRouteGetFilteredRoute(filterRouteByDstAndGw(link, subnet, gwIP)) - return route != nil, err + return route, err } // LinkNeighDel deletes an ip binding for a given link diff --git a/go-controller/pkg/util/net_linux_unit_test.go b/go-controller/pkg/util/net_linux_unit_test.go index d6ca4764c8..b7593ad5c3 100644 --- a/go-controller/pkg/util/net_linux_unit_test.go +++ b/go-controller/pkg/util/net_linux_unit_test.go @@ -314,6 +314,129 @@ func TestLinkAddrExist(t *testing.T) { } } +func TestSyncAddresses(t *testing.T) { + mockNetLinkOps := new(mocks.NetLinkOps) + mockLink := new(netlink_mocks.Link) + // below is defined in net_linux.go + netLinkOps = mockNetLinkOps + existingIPNet := netlink.Addr{ + IPNet: ovntest.MustParseIPNet("192.168.1.15/24"), + } + undesiredExistingIPNet := netlink.Addr{ + IPNet: ovntest.MustParseIPNet("123.123.123.15/24"), + } + undesiredExistingIPNet2 := netlink.Addr{ + IPNet: ovntest.MustParseIPNet("123.123.124.15/24"), + } + linkLocalAddr := netlink.Addr{ + IPNet: ovntest.MustParseIPNet("fe80::210:5aff:feaa:20a2/64"), + } + + tests := []struct { + desc string + inputLink netlink.Link + inputNewAddrs []*net.IPNet + errExp bool + onRetArgsNetLinkLibOpers []ovntest.TestifyMockHelper + onRetArgsLinkIfaceOpers []ovntest.TestifyMockHelper + }{ + { + desc: "specifying multiple address families fails", + inputLink: mockLink, + inputNewAddrs: []*net.IPNet{ovntest.MustParseIPNet("192.168.1.15/24"), ovntest.MustParseIPNet("6b35:d6d1:5789:1b33:8ad4:866c:78c1:a085/128")}, + errExp: true, + onRetArgsNetLinkLibOpers: []ovntest.TestifyMockHelper{}, + onRetArgsLinkIfaceOpers: []ovntest.TestifyMockHelper{}, + }, + { + desc: "link address list failure causes error", + inputLink: mockLink, + inputNewAddrs: []*net.IPNet{ovntest.MustParseIPNet("192.168.1.15/24")}, + errExp: true, + onRetArgsNetLinkLibOpers: []ovntest.TestifyMockHelper{ + {OnCallMethodName: "AddrList", OnCallMethodArgType: []string{"*mocks.Link", "int"}, RetArgList: []interface{}{nil, fmt.Errorf("mock error")}}, + }, + onRetArgsLinkIfaceOpers: []ovntest.TestifyMockHelper{ + {OnCallMethodName: "Attrs", OnCallMethodArgType: []string{}, RetArgList: []interface{}{&netlink.LinkAttrs{Name: "testIfaceName"}}}, + }, + }, + { + desc: "new non-existent address should be added", + inputLink: mockLink, + inputNewAddrs: []*net.IPNet{ovntest.MustParseIPNet("192.168.1.15/24")}, + errExp: false, + onRetArgsNetLinkLibOpers: []ovntest.TestifyMockHelper{ + {OnCallMethodName: "AddrList", OnCallMethodArgType: []string{"*mocks.Link", "int"}, RetArgList: []interface{}{[]netlink.Addr{}, nil}}, + {OnCallMethodName: "AddrAdd", OnCallMethodArgType: []string{"*mocks.Link", "*netlink.Addr"}, RetArgList: []interface{}{nil}}, + }, + onRetArgsLinkIfaceOpers: []ovntest.TestifyMockHelper{}, + }, + { + desc: "address that already exists should not be added", + inputLink: mockLink, + inputNewAddrs: []*net.IPNet{ovntest.MustParseIPNet("192.168.1.15/24")}, + errExp: false, + onRetArgsNetLinkLibOpers: []ovntest.TestifyMockHelper{ + {OnCallMethodName: "AddrList", OnCallMethodArgType: []string{"*mocks.Link", "int"}, RetArgList: []interface{}{[]netlink.Addr{existingIPNet}, nil}}, + }, + onRetArgsLinkIfaceOpers: []ovntest.TestifyMockHelper{}, + }, + { + desc: "address should be added while undesired address should be removed", + inputLink: mockLink, + inputNewAddrs: []*net.IPNet{ovntest.MustParseIPNet("192.168.1.15/24")}, + errExp: false, + onRetArgsNetLinkLibOpers: []ovntest.TestifyMockHelper{ + {OnCallMethodName: "AddrList", OnCallMethodArgType: []string{"*mocks.Link", "int"}, RetArgList: []interface{}{[]netlink.Addr{undesiredExistingIPNet}, nil}}, + {OnCallMethodName: "AddrDel", OnCallMethodArgType: []string{"*mocks.Link", "*netlink.Addr"}, RetArgList: []interface{}{nil}}, + {OnCallMethodName: "AddrAdd", OnCallMethodArgType: []string{"*mocks.Link", "*netlink.Addr"}, RetArgList: []interface{}{nil}}, + }, + onRetArgsLinkIfaceOpers: []ovntest.TestifyMockHelper{}, + }, + { + desc: "multiple addresses should be added while multiple undesired addresses should be removed", + inputLink: mockLink, + inputNewAddrs: []*net.IPNet{ovntest.MustParseIPNet("192.168.1.15/24"), ovntest.MustParseIPNet("192.168.1.16/24")}, + errExp: false, + onRetArgsNetLinkLibOpers: []ovntest.TestifyMockHelper{ + {OnCallMethodName: "AddrList", OnCallMethodArgType: []string{"*mocks.Link", "int"}, RetArgList: []interface{}{[]netlink.Addr{undesiredExistingIPNet, undesiredExistingIPNet2}, nil}}, + {OnCallMethodName: "AddrDel", OnCallMethodArgType: []string{"*mocks.Link", "*netlink.Addr"}, RetArgList: []interface{}{nil}}, + {OnCallMethodName: "AddrDel", OnCallMethodArgType: []string{"*mocks.Link", "*netlink.Addr"}, RetArgList: []interface{}{nil}}, + {OnCallMethodName: "AddrAdd", OnCallMethodArgType: []string{"*mocks.Link", "*netlink.Addr"}, RetArgList: []interface{}{nil}}, + {OnCallMethodName: "AddrAdd", OnCallMethodArgType: []string{"*mocks.Link", "*netlink.Addr"}, RetArgList: []interface{}{nil}}, + }, + onRetArgsLinkIfaceOpers: []ovntest.TestifyMockHelper{}, + }, + { + desc: "IPv6 LLA addresses should not be touched", + inputLink: mockLink, + inputNewAddrs: []*net.IPNet{ovntest.MustParseIPNet("6b35:d6d1:5789:1b33:8ad4:866c:78c1:a085/128")}, + errExp: false, + onRetArgsNetLinkLibOpers: []ovntest.TestifyMockHelper{ + {OnCallMethodName: "AddrList", OnCallMethodArgType: []string{"*mocks.Link", "int"}, RetArgList: []interface{}{[]netlink.Addr{linkLocalAddr}, nil}}, + {OnCallMethodName: "AddrAdd", OnCallMethodArgType: []string{"*mocks.Link", "*netlink.Addr"}, RetArgList: []interface{}{nil}}, + }, + onRetArgsLinkIfaceOpers: []ovntest.TestifyMockHelper{}, + }, + } + for i, tc := range tests { + t.Run(fmt.Sprintf("%d:%s", i, tc.desc), func(t *testing.T) { + + ovntest.ProcessMockFnList(&mockNetLinkOps.Mock, tc.onRetArgsNetLinkLibOpers) + ovntest.ProcessMockFnList(&mockLink.Mock, tc.onRetArgsLinkIfaceOpers) + err := SyncAddresses(tc.inputLink, tc.inputNewAddrs) + t.Log(err) + if tc.errExp { + assert.Error(t, err) + } else { + assert.Nil(t, err) + } + mockNetLinkOps.AssertExpectations(t) + mockLink.AssertExpectations(t) + }) + } +} + func TestLinkAddrAdd(t *testing.T) { mockNetLinkOps := new(mocks.NetLinkOps) mockLink := new(netlink_mocks.Link) @@ -674,15 +797,15 @@ func TestLinkRouteExists(t *testing.T) { ovntest.ProcessMockFnList(&mockNetLinkOps.Mock, tc.onRetArgsNetLinkLibOpers) ovntest.ProcessMockFnList(&mockLink.Mock, tc.onRetArgsLinkIfaceOpers) - flag, err := LinkRouteExists(tc.inputLink, tc.inputGwIP, tc.inputSubnet) - t.Log(flag, err) + route, err := LinkRouteGetByDstAndGw(tc.inputLink, tc.inputGwIP, tc.inputSubnet) + t.Log(route, err) if tc.errExp { assert.Error(t, err) } else { assert.Nil(t, err) } if tc.outBoolFlag { - assert.True(t, flag) + assert.True(t, route != nil) } mockNetLinkOps.AssertExpectations(t) mockLink.AssertExpectations(t) diff --git a/go-controller/pkg/util/node_annotations.go b/go-controller/pkg/util/node_annotations.go index 145f01b9e0..d2581f9977 100644 --- a/go-controller/pkg/util/node_annotations.go +++ b/go-controller/pkg/util/node_annotations.go @@ -104,6 +104,9 @@ const ( // standard linux interfaces and not interfaces of type OVS. OVNNodeSecondaryHostEgressIPs = "k8s.ovn.org/secondary-host-egress-ips" + // OVNNodeBridgeEgressIPs contains the EIP addresses that are assigned to default external bridge linux interface of type OVS. + OVNNodeBridgeEgressIPs = "k8s.ovn.org/bridge-egress-ips" + // egressIPConfigAnnotationKey is used to indicate the cloud subnet and // capacity for each node. It is set by // openshift/cloud-network-config-controller @@ -143,8 +146,20 @@ const ( // default network and other layer3 secondary networks by cluster manager. ovnNetworkIDs = "k8s.ovn.org/network-ids" - // invalidNetworkID signifies its an invalid network id - InvalidNetworkID = -1 + // ovnUDNLayer2NodeGRLRPTunnelIDs is the constant string representing the tunnel id allocated for the + // UDN L2 network for this node's GR LRP by cluster manager. This is used to create the remote tunnel + // ports for each node. + // "k8s.ovn.org/udn-layer2-node-gateway-router-lrp-tunnel-ids": "{ + // "l2-network-a":"5", + // "l2-network-b":"10"} + // }", + ovnUDNLayer2NodeGRLRPTunnelIDs = "k8s.ovn.org/udn-layer2-node-gateway-router-lrp-tunnel-ids" + + // InvalidID signifies its an invalid network id or invalid tunnel id + InvalidID = -1 + + // NoID signifies its an empty tunnel id (its reserved as un-usable when the allocator is created) + NoID = 0 ) type L3GatewayConfig struct { @@ -528,6 +543,34 @@ func ParseNodeManagementPortMACAddresses(node *kapi.Node, netName string) (net.H return net.ParseMAC(macAddress) } +// ParseUDNLayer2NodeGRLRPTunnelIDs parses the 'ovnUDNLayer2NodeGRLRPTunnelIDs' annotation +// for the specified network in 'netName' and returns the tunnelID. +func ParseUDNLayer2NodeGRLRPTunnelIDs(node *kapi.Node, netName string) (int, error) { + tunnelIDsMap, err := parseNetworkMapAnnotation(node.Annotations, ovnUDNLayer2NodeGRLRPTunnelIDs) + if err != nil { + return InvalidID, err + } + + tunnelID, ok := tunnelIDsMap[netName] + if !ok { + return InvalidID, newAnnotationNotSetError("node %q has no %q annotation for network %s", node.Name, ovnUDNLayer2NodeGRLRPTunnelIDs, netName) + } + + return strconv.Atoi(tunnelID) +} + +// UpdateUDNLayer2NodeGRLRPTunnelIDs updates the ovnUDNLayer2NodeGRLRPTunnelIDs annotation for the network name 'netName' with the tunnel id 'tunnelID'. +// If 'tunnelID' is invalid tunnel ID (-1), then it deletes that network from the tunnel ids annotation. +func UpdateUDNLayer2NodeGRLRPTunnelIDs(annotations map[string]string, netName string, tunnelID int) (map[string]string, error) { + if annotations == nil { + annotations = map[string]string{} + } + if err := updateNetworkAnnotation(annotations, netName, tunnelID, ovnUDNLayer2NodeGRLRPTunnelIDs); err != nil { + return nil, err + } + return annotations, nil +} + type primaryIfAddrAnnotation struct { IPv4 string `json:"ipv4,omitempty"` IPv6 string `json:"ipv6,omitempty"` @@ -725,7 +768,7 @@ type ParsedNodeEgressIPConfiguration struct { Capacity Capacity } -func getNodeIfAddrAnnotation(node *kapi.Node) (*primaryIfAddrAnnotation, error) { +func GetNodeIfAddrAnnotation(node *kapi.Node) (*primaryIfAddrAnnotation, error) { nodeIfAddrAnnotation, ok := node.Annotations[OvnNodeIfAddr] if !ok { return nil, newAnnotationNotSetError("%s annotation not found for node %q", OvnNodeIfAddr, node.Name) @@ -742,7 +785,7 @@ func getNodeIfAddrAnnotation(node *kapi.Node) (*primaryIfAddrAnnotation, error) // ParseNodePrimaryIfAddr returns the IPv4 / IPv6 values for the node's primary network interface func ParseNodePrimaryIfAddr(node *kapi.Node) (*ParsedNodeEgressIPConfiguration, error) { - nodeIfAddr, err := getNodeIfAddrAnnotation(node) + nodeIfAddr, err := GetNodeIfAddrAnnotation(node) if err != nil { return nil, err } @@ -830,7 +873,7 @@ func ParseNodeGatewayRouterLRPAddrs(node *kapi.Node) ([]*net.IPNet, error) { return parsePrimaryIfAddrAnnotation(node, ovnNodeGRLRPAddr) } -func parseNodeGatewayRouterJoinNetwork(node *kapi.Node, netName string) (primaryIfAddrAnnotation, error) { +func ParseNodeGatewayRouterJoinNetwork(node *kapi.Node, netName string) (primaryIfAddrAnnotation, error) { var val primaryIfAddrAnnotation joinSubnetMap, err := parseJoinSubnetAnnotation(node.Annotations, OVNNodeGRLRPAddrs) if err != nil { @@ -839,7 +882,7 @@ func parseNodeGatewayRouterJoinNetwork(node *kapi.Node, netName string) (primary } val, ok := joinSubnetMap[netName] if !ok { - return val, fmt.Errorf("unable to fetch annotation value on node %s for network %s", + return val, newAnnotationNotSetError("unable to fetch annotation value on node %s for network %s", node.Name, netName) } return val, nil @@ -848,7 +891,7 @@ func parseNodeGatewayRouterJoinNetwork(node *kapi.Node, netName string) (primary // ParseNodeGatewayRouterJoinIPv4 returns the IPv4 address for the node's gateway router port // stored in the 'OVNNodeGRLRPAddrs' annotation func ParseNodeGatewayRouterJoinIPv4(node *kapi.Node, netName string) (net.IP, error) { - primaryIfAddr, err := parseNodeGatewayRouterJoinNetwork(node, netName) + primaryIfAddr, err := ParseNodeGatewayRouterJoinNetwork(node, netName) if err != nil { return nil, err } @@ -867,7 +910,7 @@ func ParseNodeGatewayRouterJoinIPv4(node *kapi.Node, netName string) (net.IP, er // ParseNodeGatewayRouterJoinAddrs returns the IPv4 and/or IPv6 addresses for the node's gateway router port // stored in the 'OVNNodeGRLRPAddrs' annotation func ParseNodeGatewayRouterJoinAddrs(node *kapi.Node, netName string) ([]*net.IPNet, error) { - primaryIfAddr, err := parseNodeGatewayRouterJoinNetwork(node, netName) + primaryIfAddr, err := ParseNodeGatewayRouterJoinNetwork(node, netName) if err != nil { return nil, err } @@ -932,7 +975,7 @@ func ParseCloudEgressIPConfig(node *kapi.Node) (*ParsedNodeEgressIPConfiguration // ParsedNodeEgressIPConfiguration.V[4|6].IP is used to verify if an egress IP matches node IP to disable its creation // use node IP instead of the value assigned from cloud egress CIDR config - nodeIfAddr, err := getNodeIfAddrAnnotation(node) + nodeIfAddr, err := GetNodeIfAddrAnnotation(node) if err != nil { return nil, err } @@ -1010,6 +1053,38 @@ func ParseNodeHostCIDRs(node *kapi.Node) (sets.Set[string], error) { return sets.New(cfg...), nil } +// ParseNodeHostIPDropNetMask returns the parsed host IP addresses found on a node's host CIDR annotation. Removes the mask. +func ParseNodeHostIPDropNetMask(node *kapi.Node) (sets.Set[string], error) { + nodeIfAddrAnnotation, ok := node.Annotations[OvnNodeIfAddr] + if !ok { + return nil, newAnnotationNotSetError("%s annotation not found for node %q", OvnNodeIfAddr, node.Name) + } + nodeIfAddr := &primaryIfAddrAnnotation{} + if err := json.Unmarshal([]byte(nodeIfAddrAnnotation), nodeIfAddr); err != nil { + return nil, fmt.Errorf("failed to unmarshal annotation: %s for node %q, err: %v", OvnNodeIfAddr, node.Name, err) + } + + var cfg []string + if nodeIfAddr.IPv4 != "" { + cfg = append(cfg, nodeIfAddr.IPv4) + } + if nodeIfAddr.IPv6 != "" { + cfg = append(cfg, nodeIfAddr.IPv6) + } + if len(cfg) == 0 { + return nil, fmt.Errorf("node: %q does not have any IP information set", node.Name) + } + + for i, cidr := range cfg { + ip, _, err := net.ParseCIDR(cidr) + if err != nil || ip == nil { + return nil, fmt.Errorf("failed to parse node host cidr: %v", err) + } + cfg[i] = ip.String() + } + return sets.New(cfg...), nil +} + // ParseNodeHostCIDRsDropNetMask returns the parsed host IP addresses found on a node's host CIDR annotation. Removes the mask. func ParseNodeHostCIDRsDropNetMask(node *kapi.Node) (sets.Set[string], error) { addrAnnotation, ok := node.Annotations[OVNNodeHostCIDRs] @@ -1040,7 +1115,7 @@ func GetNodeHostAddrs(node *kapi.Node) ([]string, error) { if err != nil && !IsAnnotationNotSetError(err) { return nil, fmt.Errorf("failed to get node host CIDRs for %s: %s", node.Name, err.Error()) } - return hostAddresses.UnsortedList(), nil + return sets.List(hostAddresses), nil } func ParseNodeHostCIDRsExcludeOVNNetworks(node *kapi.Node) ([]string, error) { @@ -1048,7 +1123,7 @@ func ParseNodeHostCIDRsExcludeOVNNetworks(node *kapi.Node) ([]string, error) { if err != nil { return nil, err } - ovnNetworks, err := getNodeIfAddrAnnotation(node) + ovnNetworks, err := GetNodeIfAddrAnnotation(node) if err != nil { return nil, err } @@ -1096,6 +1171,27 @@ func ParseNodeSecondaryHostEgressIPsAnnotation(node *kapi.Node) (sets.Set[string return sets.New(cfg...), nil } +// IsNodeBridgeEgressIPsAnnotationSet returns true if an annotation that tracks assignment of egress IPs to external bridge (breth0) +// is set +func IsNodeBridgeEgressIPsAnnotationSet(node *kapi.Node) bool { + _, ok := node.Annotations[OVNNodeBridgeEgressIPs] + return ok +} + +// ParseNodeBridgeEgressIPsAnnotation returns egress IPs assigned to the external bridge (breth0) +func ParseNodeBridgeEgressIPsAnnotation(node *kapi.Node) ([]string, error) { + addrAnnotation, ok := node.Annotations[OVNNodeBridgeEgressIPs] + if !ok { + return nil, newAnnotationNotSetError("%s annotation not found for node %q", OVNNodeBridgeEgressIPs, node.Name) + } + + var cfg []string + if err := json.Unmarshal([]byte(addrAnnotation), &cfg); err != nil { + return nil, fmt.Errorf("failed to unmarshal %s annotation %s for node %q: %v", OVNNodeBridgeEgressIPs, addrAnnotation, node.Name, err) + } + return cfg, nil +} + // IsSecondaryHostNetworkContainingIP attempts to find a secondary host network that will host the argument IP. If no network is // found, false is returned func IsSecondaryHostNetworkContainingIP(node *v1.Node, ip net.IP) (bool, error) { @@ -1263,21 +1359,21 @@ func parseNetworkMapAnnotation(nodeAnnotations map[string]string, annotationName return nil, newAnnotationNotSetError("could not find %q annotation", annotationName) } - networkIdsStrMap := map[string]string{} - networkIds := make(map[string]string) - if err := json.Unmarshal([]byte(annotation), &networkIds); err != nil { + idsStrMap := map[string]string{} + ids := make(map[string]string) + if err := json.Unmarshal([]byte(annotation), &ids); err != nil { return nil, fmt.Errorf("could not parse %q annotation %q : %v", annotationName, annotation, err) } - for netName, v := range networkIds { - networkIdsStrMap[netName] = v + for netName, v := range ids { + idsStrMap[netName] = v } - if len(networkIdsStrMap) == 0 { + if len(idsStrMap) == 0 { return nil, fmt.Errorf("unexpected empty %s annotation", annotationName) } - return networkIdsStrMap, nil + return idsStrMap, nil } // ParseNetworkIDAnnotation parses the 'ovnNetworkIDs' annotation for the specified @@ -1285,57 +1381,58 @@ func parseNetworkMapAnnotation(nodeAnnotations map[string]string, annotationName func ParseNetworkIDAnnotation(node *kapi.Node, netName string) (int, error) { networkIDsMap, err := parseNetworkMapAnnotation(node.Annotations, ovnNetworkIDs) if err != nil { - return InvalidNetworkID, err + return InvalidID, err } networkID, ok := networkIDsMap[netName] if !ok { - return InvalidNetworkID, newAnnotationNotSetError("node %q has no %q annotation for network %s", node.Name, ovnNetworkIDs, netName) + return InvalidID, newAnnotationNotSetError("node %q has no %q annotation for network %s", node.Name, ovnNetworkIDs, netName) } return strconv.Atoi(networkID) } -// updateNetworkIDsAnnotation updates the ovnNetworkIDs annotation in the 'annotations' map -// with the provided network id in 'networkID'. If 'networkID' is InvalidNetworkID (-1) -// it deletes the ovnNetworkIDs annotation from the map. -func updateNetworkIDsAnnotation(annotations map[string]string, netName string, networkID int) error { +// updateNetworkAnnotation updates the provided annotationName in the 'annotations' map +// with the provided ID in 'annotationName's value. If 'id' is InvalidID (-1) +// it deletes the annotationName annotation from the map. +// It is currently used for ovnNetworkIDs annotation updates +func updateNetworkAnnotation(annotations map[string]string, netName string, id int, annotationName string) error { var bytes []byte - // First get the all network ids for all existing networks - networkIDsMap, err := parseNetworkMapAnnotation(annotations, ovnNetworkIDs) + // First get the all ids for all existing networks + idsMap, err := parseNetworkMapAnnotation(annotations, annotationName) if err != nil { if !IsAnnotationNotSetError(err) { return fmt.Errorf("failed to parse node network id annotation %q: %v", annotations, err) } // in the case that the annotation does not exist - networkIDsMap = map[string]string{} + idsMap = map[string]string{} } // add or delete network id of the specified network - if networkID == InvalidNetworkID { - delete(networkIDsMap, netName) + if id == InvalidID { + delete(idsMap, netName) } else { - networkIDsMap[netName] = strconv.Itoa(networkID) + idsMap[netName] = strconv.Itoa(id) } - // if no networks left, just delete the network ids annotation from node annotations. - if len(networkIDsMap) == 0 { - delete(annotations, ovnNetworkIDs) + // if no networks left, just delete the annotation from node annotations. + if len(idsMap) == 0 { + delete(annotations, annotationName) return nil } // Marshal all network ids back to annotations. - networkIdsStrMap := make(map[string]string) - for n, id := range networkIDsMap { - networkIdsStrMap[n] = id + idsStrMap := make(map[string]string) + for n, id := range idsMap { + idsStrMap[n] = id } - bytes, err = json.Marshal(networkIdsStrMap) + bytes, err = json.Marshal(idsStrMap) if err != nil { return err } - annotations[ovnNetworkIDs] = string(bytes) + annotations[annotationName] = string(bytes) return nil } @@ -1345,7 +1442,7 @@ func UpdateNetworkIDAnnotation(annotations map[string]string, netName string, ne if annotations == nil { annotations = map[string]string{} } - err := updateNetworkIDsAnnotation(annotations, netName, networkID) + err := updateNetworkAnnotation(annotations, netName, networkID, ovnNetworkIDs) if err != nil { return nil, err } @@ -1414,11 +1511,11 @@ func GetNetworkID(nodes []*corev1.Node, nInfo BasicNetInfo) (int, error) { if IsAnnotationNotSetError(err) { continue } - return InvalidNetworkID, err + return InvalidID, err } - if networkID != InvalidNetworkID { + if networkID != InvalidID { return networkID, nil } } - return InvalidNetworkID, fmt.Errorf("missing network id for network '%s'", nInfo.GetNetworkName()) + return InvalidID, fmt.Errorf("missing network id for network '%s'", nInfo.GetNetworkName()) } diff --git a/go-controller/pkg/util/node_annotations_unit_test.go b/go-controller/pkg/util/node_annotations_unit_test.go index 611fb8445f..3acb019e35 100644 --- a/go-controller/pkg/util/node_annotations_unit_test.go +++ b/go-controller/pkg/util/node_annotations_unit_test.go @@ -775,7 +775,7 @@ func TestGetNetworkID(t *testing.T) { desc: "with no nodes should return and error and invalid network ID", netInfo: newDummyNetInfo("rednamespace", "bluenet"), expectedError: fmt.Errorf("missing network id for network 'bluenet'"), - expectedNetworkID: InvalidNetworkID, + expectedNetworkID: InvalidID, }, { desc: "with bad network ID annotations should return and error and invalid network ID", @@ -790,7 +790,7 @@ func TestGetNetworkID(t *testing.T) { }, netInfo: newDummyNetInfo("rednamespace", "bluenet"), expectedError: fmt.Errorf("could not parse"), - expectedNetworkID: InvalidNetworkID, + expectedNetworkID: InvalidID, }, { desc: "with multiple networks annotation should return expected network ID and no error", @@ -826,3 +826,69 @@ func TestGetNetworkID(t *testing.T) { }) } } + +func TestParseUDNLayer2NodeGRLRPTunnelIDs(t *testing.T) { + tests := []struct { + desc string + inpNode *v1.Node + inpNetName string + res int + errExpected bool + }{ + { + desc: "annotation not found for node and invalidID", + inpNode: &v1.Node{}, + inpNetName: "rednet", + res: -1, + }, + { + desc: "parse completed and validID", + inpNode: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "k8s.ovn.org/udn-layer2-node-gateway-router-lrp-tunnel-ids": `{"rednet":"5"}`, + }, + }, + }, + inpNetName: "rednet", + errExpected: false, + res: 5, + }, + { + desc: "parse completed and invalid value", + inpNode: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "k8s.ovn.org/udn-layer2-node-gateway-router-lrp-tunnel-ids": `blah`, + }, + }, + }, + errExpected: true, + inpNetName: "rednet", + res: -1, + }, + { + desc: "multiple networks; parse completed and validID", + inpNode: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "k8s.ovn.org/udn-layer2-node-gateway-router-lrp-tunnel-ids": `{"rednet":"5", "bluenet":"8"}`, + }, + }, + }, + inpNetName: "bluenet", + errExpected: false, + res: 8, + }, + } + for i, tc := range tests { + t.Run(fmt.Sprintf("%d:%s", i, tc.desc), func(t *testing.T) { + res, err := ParseUDNLayer2NodeGRLRPTunnelIDs(tc.inpNode, tc.inpNetName) + if tc.errExpected { + t.Log(err) + assert.Error(t, err) + } + assert.Equal(t, tc.res, res) + }) + } +} diff --git a/go-controller/pkg/util/pod_annotation.go b/go-controller/pkg/util/pod_annotation.go index 12583871b9..3f18b6c81d 100644 --- a/go-controller/pkg/util/pod_annotation.go +++ b/go-controller/pkg/util/pod_annotation.go @@ -11,10 +11,13 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "k8s.io/client-go/tools/cache" v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" listers "k8s.io/client-go/listers/core/v1" utilnet "k8s.io/utils/net" + "sigs.k8s.io/yaml" ) // This handles the "k8s.ovn.org/pod-networks" annotation on Pods, used to pass @@ -51,6 +54,11 @@ const ( OvnPodAnnotationName = "k8s.ovn.org/pod-networks" // DefNetworkAnnotation is the pod annotation for the cluster-wide default network DefNetworkAnnotation = "v1.multus-cni.io/default-network" + // OvnUDNIPAMClaimName is used for workload owners to instruct OVN-K which + // IPAMClaim will hold the allocation for the workload + OvnUDNIPAMClaimName = "k8s.ovn.org/primary-udn-ipamclaim" + // UDNOpenPortsAnnotationName is the pod annotation to open default network pods on UDN pods. + UDNOpenPortsAnnotationName = "k8s.ovn.org/open-default-ports" ) var ErrNoPodIPFound = errors.New("no pod IPs found") @@ -121,6 +129,12 @@ type podRoute struct { NextHop string `json:"nextHop"` } +type OpenPort struct { + // valid values are tcp, udp, sctp, icmp + Protocol string `json:"protocol"` + Port *int `json:"port,omitempty"` +} + // MarshalPodAnnotation adds the pod's network details of the specified network to the corresponding pod annotation. func MarshalPodAnnotation(annotations map[string]string, podInfo *PodAnnotation, nadName string) (map[string]string, error) { if annotations == nil { @@ -310,6 +324,21 @@ func GetPodIPsOfNetwork(pod *v1.Pod, nInfo NetInfo) ([]net.IP, error) { return DefaultNetworkPodIPs(pod) } +// GetPodCIDRsWithFullMaskOfNetwork returns the pod's IP addresses in a CIDR with FullMask format +// from a pod network annotation 'k8s.ovn.org/pod-networks' using key nadName. +func GetPodCIDRsWithFullMaskOfNetwork(pod *v1.Pod, nadName string) []*net.IPNet { + ips := getAnnotatedPodIPs(pod, nadName) + ipNets := make([]*net.IPNet, 0, len(ips)) + for _, ip := range ips { + ipNet := net.IPNet{ + IP: ip, + Mask: GetIPFullMask(ip), + } + ipNets = append(ipNets, &ipNet) + } + return ipNets +} + func DefaultNetworkPodIPs(pod *v1.Pod) ([]net.IP, error) { // Try to use Kube API pod IPs for default network first // This is much faster than trying to unmarshal annotations @@ -353,7 +382,14 @@ func SecondaryNetworkPodIPs(pod *v1.Pod, networkInfo NetInfo) ([]net.IP, error) return ips, nil } +// PodNadNames returns pod's NAD names associated with given network specified by netconf. +// If netinfo belongs to user defined primary network, then retrieve NAD names from +// netinfo.GetNADs() which is serving pod's namespace. +// For all other cases, retrieve NAD names for the pod based on NetworkSelectionElement. func PodNadNames(pod *v1.Pod, netinfo NetInfo) ([]string, error) { + if netinfo.IsPrimaryNetwork() { + return GetPrimaryNetworkNADNamesForNamespaceFromNetInfo(pod.Namespace, netinfo) + } on, networkMap, err := GetPodNADToNetworkMapping(pod, netinfo) // skip pods that are not on this network if err != nil { @@ -368,6 +404,20 @@ func PodNadNames(pod *v1.Pod, netinfo NetInfo) ([]string, error) { return nadNames, nil } +func GetPrimaryNetworkNADNamesForNamespaceFromNetInfo(namespace string, netinfo NetInfo) ([]string, error) { + for _, nadName := range netinfo.GetNADs() { + ns, _, err := cache.SplitMetaNamespaceKey(nadName) + if err != nil { + return nil, fmt.Errorf("error parsing nad name %s from network %s: %v", nadName, netinfo.GetNetworkName(), err) + } + if ns != namespace { + continue + } + return []string{nadName}, nil + } + return []string{}, nil +} + func getAnnotatedPodIPs(pod *v1.Pod, nadName string) []net.IP { var ips []net.IP annotation, _ := UnmarshalPodAnnotation(pod.Annotations, nadName) @@ -612,3 +662,34 @@ func AddRoutesGatewayIP( return nil } + +// UnmarshalUDNOpenPortsAnnotation returns the OpenPorts from the pod annotation. If annotation is not present, +// empty list with no error is returned. +func UnmarshalUDNOpenPortsAnnotation(annotations map[string]string) ([]*OpenPort, error) { + result := []*OpenPort{} + ports, ok := annotations[UDNOpenPortsAnnotationName] + if !ok { + return result, nil + } + if err := yaml.Unmarshal([]byte(ports), &result); err != nil { + return nil, fmt.Errorf("failed to unmarshal UDN open ports annotation %s: %v", ports, err) + } + allowedProtocols := sets.New("tcp", "udp", "sctp", "icmp") + + for _, portDef := range result { + if !allowedProtocols.Has(portDef.Protocol) { + return nil, fmt.Errorf("invalid protocol %s", portDef.Protocol) + } + if portDef.Protocol == "icmp" { + if portDef.Port != nil { + return nil, fmt.Errorf("invalid port %v for icmp protocol, should be empty", *portDef.Port) + } + } else if portDef.Port == nil { + return nil, fmt.Errorf("port is required for %s protocol", portDef.Protocol) + } + if portDef.Port != nil && (*portDef.Port > 65535 || *portDef.Port < 0) { + return nil, fmt.Errorf("invalid port %v", *portDef.Port) + } + } + return result, nil +} diff --git a/go-controller/pkg/util/pod_annotation_unit_test.go b/go-controller/pkg/util/pod_annotation_unit_test.go index c3d5db1896..c2ab82a194 100644 --- a/go-controller/pkg/util/pod_annotation_unit_test.go +++ b/go-controller/pkg/util/pod_annotation_unit_test.go @@ -371,3 +371,105 @@ func newDummyNetInfo(namespace, networkName string) NetInfo { netInfo.AddNADs(GetNADName(namespace, networkName)) return netInfo } + +func TestUnmarshalUDNOpenPortsAnnotation(t *testing.T) { + intRef := func(i int) *int { + return &i + } + + tests := []struct { + desc string + input string + errSubstr string + result []*OpenPort + }{ + { + desc: "protocol without port", + input: `- protocol: tcp`, + errSubstr: "port is required", + }, + { + desc: "port without protocol", + input: `- port: 80`, + errSubstr: "invalid protocol", + }, + { + desc: "invalid protocol", + input: `- protocol: foo`, + errSubstr: "invalid protocol", + }, + { + desc: "icmp with port", + input: `- protocol: icmp + port: 80`, + errSubstr: "invalid port 80 for icmp protocol, should be empty", + }, + { + desc: "valid icmp", + input: `- protocol: icmp`, + result: []*OpenPort{ + { + Protocol: "icmp", + }, + }, + }, + { + desc: "invalid port", + input: `- protocol: tcp + port: 100000`, + errSubstr: "invalid port", + }, + { + desc: "valid tcp", + input: `- protocol: tcp + port: 80`, + result: []*OpenPort{ + { + Protocol: "tcp", + Port: intRef(80), + }, + }, + }, + { + desc: "valid multiple protocols", + input: `- protocol: tcp + port: 1 +- protocol: udp + port: 2 +- protocol: sctp + port: 3 +- protocol: icmp`, + result: []*OpenPort{ + { + Protocol: "tcp", + Port: intRef(1), + }, + { + Protocol: "udp", + Port: intRef(2), + }, + { + Protocol: "sctp", + Port: intRef(3), + }, + { + Protocol: "icmp", + }, + }, + }, + } + for _, tc := range tests { + t.Run(tc.desc, func(t *testing.T) { + res, err := UnmarshalUDNOpenPortsAnnotation(map[string]string{ + UDNOpenPortsAnnotationName: tc.input, + }) + if tc.errSubstr != "" { + assert.Error(t, err) + assert.Contains(t, err.Error(), tc.errSubstr) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.result, res) + } + }) + } +} diff --git a/go-controller/pkg/util/slice.go b/go-controller/pkg/util/slice.go index 17f9d8655c..784283c8fe 100644 --- a/go-controller/pkg/util/slice.go +++ b/go-controller/pkg/util/slice.go @@ -20,3 +20,15 @@ func RemoveItemFromSliceUnstable[T comparable](slice []T, candidate T) []T { } return slice } + +// IsItemInSlice checks if candidate is equal to at least one entry in slice +func IsItemInSlice[T comparable](slice []T, candidate T) bool { + var found bool + for _, sliceEntry := range slice { + if sliceEntry == candidate { + found = true + break + } + } + return found +} diff --git a/go-controller/pkg/util/status.go b/go-controller/pkg/util/status.go new file mode 100644 index 0000000000..535b1e4fef --- /dev/null +++ b/go-controller/pkg/util/status.go @@ -0,0 +1,19 @@ +package util + +import corev1 "k8s.io/api/core/v1" + +type EventType = string + +// There are only 2 allowed event types for now: Normal and Warning +const ( + EventTypeNormal EventType = corev1.EventTypeNormal + EventTypeWarning EventType = corev1.EventTypeWarning +) + +// EventDetails may be used to pass event details to the event recorder, that is not used directly. +// It based on the EventRecorder interface for core.Events. It doesn't have related objects, +// as they are not used in the current implementation. +type EventDetails struct { + EventType EventType + Reason, Note string +} diff --git a/go-controller/pkg/util/udn/udn.go b/go-controller/pkg/util/udn/udn.go new file mode 100644 index 0000000000..3c3ec66188 --- /dev/null +++ b/go-controller/pkg/util/udn/udn.go @@ -0,0 +1,21 @@ +package udn + +import userdefinednetworkv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1" + +type specGetter interface { + GetTopology() userdefinednetworkv1.NetworkTopology + GetLayer3() *userdefinednetworkv1.Layer3Config + GetLayer2() *userdefinednetworkv1.Layer2Config +} + +func IsPrimaryNetwork(spec specGetter) bool { + var role userdefinednetworkv1.NetworkRole + switch spec.GetTopology() { + case userdefinednetworkv1.NetworkTopologyLayer3: + role = spec.GetLayer3().Role + case userdefinednetworkv1.NetworkTopologyLayer2: + role = spec.GetLayer2().Role + } + + return role == userdefinednetworkv1.NetworkRolePrimary +} diff --git a/go-controller/pkg/util/udn/udn_suite_test.go b/go-controller/pkg/util/udn/udn_suite_test.go new file mode 100644 index 0000000000..b8d3e65f24 --- /dev/null +++ b/go-controller/pkg/util/udn/udn_suite_test.go @@ -0,0 +1,13 @@ +package udn + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestUserDefinedNetworkController(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Utils User Defined Network Suite") +} diff --git a/go-controller/pkg/util/udn/udn_test.go b/go-controller/pkg/util/udn/udn_test.go new file mode 100644 index 0000000000..270ab5e0b7 --- /dev/null +++ b/go-controller/pkg/util/udn/udn_test.go @@ -0,0 +1,85 @@ +package udn + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + udnv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1" +) + +var _ = Describe("PrimaryNetworkSpec", func() { + DescribeTable("should return true, given", + func(spec specGetter) { + Expect(IsPrimaryNetwork(spec)).To(BeTrue()) + }, + Entry("udn crd spec, l3, primary", + &udnv1.UserDefinedNetworkSpec{ + Topology: udnv1.NetworkTopologyLayer3, + Layer3: &udnv1.Layer3Config{ + Role: udnv1.NetworkRolePrimary, + }, + }, + ), + Entry("udn crd spec, l2, primary", + &udnv1.UserDefinedNetworkSpec{ + Topology: udnv1.NetworkTopologyLayer2, + Layer2: &udnv1.Layer2Config{ + Role: udnv1.NetworkRolePrimary, + }, + }, + ), + Entry("cluster-udn spec, l3, primary", + &udnv1.NetworkSpec{ + Topology: udnv1.NetworkTopologyLayer3, + Layer3: &udnv1.Layer3Config{ + Role: udnv1.NetworkRolePrimary, + }, + }, + ), + Entry("cluster-udn spec, l2, primary", + &udnv1.NetworkSpec{ + Topology: udnv1.NetworkTopologyLayer2, + Layer2: &udnv1.Layer2Config{ + Role: udnv1.NetworkRolePrimary, + }, + }, + ), + ) + DescribeTable("should return false, given", + func(spec specGetter) { + Expect(IsPrimaryNetwork(spec)).To(BeFalse()) + }, + Entry("udn crd spec, l3, secondary", + &udnv1.UserDefinedNetworkSpec{ + Topology: udnv1.NetworkTopologyLayer3, + Layer3: &udnv1.Layer3Config{ + Role: udnv1.NetworkRoleSecondary, + }, + }, + ), + Entry("udn crd spec, l2, secondary", + &udnv1.UserDefinedNetworkSpec{ + Topology: udnv1.NetworkTopologyLayer2, + Layer2: &udnv1.Layer2Config{ + Role: udnv1.NetworkRoleSecondary, + }, + }, + ), + Entry("cluster-udn spec, l3, secondary", + &udnv1.NetworkSpec{ + Topology: udnv1.NetworkTopologyLayer3, + Layer3: &udnv1.Layer3Config{ + Role: udnv1.NetworkRoleSecondary, + }, + }, + ), + Entry("cluster-udn spec, l2, secondary", + &udnv1.NetworkSpec{ + Topology: udnv1.NetworkTopologyLayer2, + Layer2: &udnv1.Layer2Config{ + Role: udnv1.NetworkRoleSecondary, + }, + }, + ), + ) +}) diff --git a/go-controller/pkg/util/util.go b/go-controller/pkg/util/util.go index fe7f3ff7fc..f3f7ab33fb 100644 --- a/go-controller/pkg/util/util.go +++ b/go-controller/pkg/util/util.go @@ -12,19 +12,22 @@ import ( "time" "golang.org/x/exp/constraints" + k8stypes "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/cache" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" - "k8s.io/apimachinery/pkg/labels" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - nadlister "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/listers/k8s.cni.cncf.io/v1" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "crypto/rand" "github.com/urfave/cli/v2" v1 "k8s.io/api/core/v1" + discovery "k8s.io/api/discovery/v1" discoveryv1 "k8s.io/api/discovery/v1" + discoverylisters "k8s.io/client-go/listers/discovery/v1" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog/v2" @@ -368,65 +371,44 @@ func IsClusterIP(svcVIP string) bool { return false } -type UnknownActiveNetworkError struct { +type UnprocessedActiveNetworkError struct { namespace string + udnName string } -func (m *UnknownActiveNetworkError) Error() string { - return fmt.Sprintf("unable to determine what is the "+ - "primary role network for namespace '%s'; please remove multiple primary role network"+ - "NADs from it", m.namespace) +func (m *UnprocessedActiveNetworkError) Error() string { + return fmt.Sprintf("primary UDN %q exists in namespace %s, but NAD has not been processed yet", + m.udnName, m.namespace) } -func IsUnknownActiveNetworkError(err error) bool { - var unknownActiveNetworkError *UnknownActiveNetworkError - return errors.As(err, &unknownActiveNetworkError) +func IsUnprocessedActiveNetworkError(err error) bool { + var unprocessedActiveNetworkError *UnprocessedActiveNetworkError + return errors.As(err, &unprocessedActiveNetworkError) } -// GetActiveNetworkForNamespace returns the NetInfo struct of the active network -// for the given namespace based on the NADs present in that namespace. -// active network here means the network managing this namespace and responsible for -// plumbing all the entities for this namespace -// this is: -// 1) &DefaultNetInfo if there are no NADs in the namespace OR all NADs are Role: "primary" -// 2) &NetConf{Name: ""} if there is exactly ONE NAD with Role: "primary" -// 3) Multiple primary network role NADs ActiveNetworkUnknown error -// 4) error under all other conditions -func GetActiveNetworkForNamespace(namespace string, nadLister nadlister.NetworkAttachmentDefinitionLister) (NetInfo, error) { - if nadLister == nil { - return &DefaultNetInfo{}, nil - } - if !IsNetworkSegmentationSupportEnabled() { - return &DefaultNetInfo{}, nil - } - namespaceNADs, err := nadLister.NetworkAttachmentDefinitions(namespace).List(labels.Everything()) - if err != nil { - return nil, err - } - if len(namespaceNADs) == 0 { - return &DefaultNetInfo{}, nil - } - numberOfPrimaryNetworks := 0 - var primaryNetwork NetInfo - for _, nad := range namespaceNADs { - netInfo, err := ParseNADInfo(nad) - if err != nil { - klog.Warningf("Skipping nad '%s/%s' as active network after failing parsing it with %v", nad.Namespace, nad.Name, err) - continue - } +func NewUnprocessedActiveNetworkError(namespace, udnName string) *UnprocessedActiveNetworkError { + return &UnprocessedActiveNetworkError{namespace: namespace, udnName: udnName} +} - if netInfo.IsPrimaryNetwork() { - primaryNetwork = netInfo - numberOfPrimaryNetworks++ - primaryNetwork.AddNADs(GetNADName(nad.Namespace, nad.Name)) - } +func GetUserDefinedNetworkRole(isPrimary bool) string { + networkRole := types.NetworkRoleSecondary + if isPrimary { + networkRole = types.NetworkRolePrimary } - if numberOfPrimaryNetworks == 1 { - return primaryNetwork, nil - } else if numberOfPrimaryNetworks == 0 { - return &DefaultNetInfo{}, nil + return networkRole +} + +// GenerateExternalIDsForSwitchOrRouter returns the external IDs for logical switches and logical routers +// when it runs on a primary or secondary network. It returns an empty map +// when on the default cluster network, for backward compatibility. +func GenerateExternalIDsForSwitchOrRouter(netInfo NetInfo) map[string]string { + externalIDs := make(map[string]string) + if netInfo.IsSecondary() { + externalIDs[types.NetworkExternalID] = netInfo.GetNetworkName() + externalIDs[types.NetworkRoleExternalID] = GetUserDefinedNetworkRole(netInfo.IsPrimaryNetwork()) + externalIDs[types.TopologyExternalID] = netInfo.TopologyType() } - return nil, &UnknownActiveNetworkError{namespace: namespace} + return externalIDs } func GetSecondaryNetworkLogicalPortName(podNamespace, podName, nadName string) string { @@ -437,6 +419,10 @@ func GetLogicalPortName(podNamespace, podName string) string { return composePortName(podNamespace, podName) } +func GetNamespacePodFromCDNPortName(portName string) (string, string) { + return decomposePortName(portName) +} + func GetSecondaryNetworkIfaceId(podNamespace, podName, nadName string) string { return GetSecondaryNetworkPrefix(nadName) + composePortName(podNamespace, podName) } @@ -455,6 +441,14 @@ func composePortName(podNamespace, podName string) string { return podNamespace + "_" + podName } +func decomposePortName(s string) (string, string) { + namespacePod := strings.Split(s, "_") + if len(namespacePod) != 2 { + return "", "" + } + return namespacePod[0], namespacePod[1] +} + func SliceHasStringItem(slice []string, item string) bool { for _, i := range slice { if i == item { @@ -515,24 +509,121 @@ func IsDefaultEndpointSlice(endpointSlice *discoveryv1.EndpointSlice) bool { return ok } -// GetDefaultEndpointSlicesEventHandler returns an event handler based on the provided handlerFuncs -// If IsNetworkSegmentationSupportEnabled returns true it returns a handler that filters out the mirrored EndpointSlices. -// Otherwise, returns handlerFuncs as is. +// IsEndpointSliceForNetwork checks if the provided EndpointSlice is meant for the given network +// if types.LabelUserDefinedEndpointSliceNetwork is set it compares it to the network name, +// otherwise it returns true if the network is the default +func IsEndpointSliceForNetwork(endpointSlice *discoveryv1.EndpointSlice, network NetInfo) bool { + if endpointSliceNetwork, ok := endpointSlice.Labels[types.LabelUserDefinedEndpointSliceNetwork]; ok { + return endpointSliceNetwork == network.GetNetworkName() + } + return network.IsDefault() +} + func GetDefaultEndpointSlicesEventHandler(handlerFuncs cache.ResourceEventHandlerFuncs) cache.ResourceEventHandler { + return GetEndpointSlicesEventHandlerForNetwork(handlerFuncs, &DefaultNetInfo{}) +} + +// GetEndpointSlicesEventHandlerForNetwork returns an event handler based on the provided handlerFuncs and netInfo. +// On the default network, it returns a handler that filters out the mirrored EndpointSlices. Conversely in +// a primary network it returns a handler that only keeps the mirrored EndpointSlices and filters out the original ones. +// Otherwise, returns handlerFuncs as is. +func GetEndpointSlicesEventHandlerForNetwork(handlerFuncs cache.ResourceEventHandlerFuncs, netInfo NetInfo) cache.ResourceEventHandler { var eventHandler cache.ResourceEventHandler eventHandler = handlerFuncs - if IsNetworkSegmentationSupportEnabled() { - // Filter out objects without the default serviceName label to exclude mirrored EndpointSlices + if !IsNetworkSegmentationSupportEnabled() { + return eventHandler + } + + var filterFunc func(obj interface{}) bool + + if netInfo.IsDefault() { + // Filter out objects without the "kubernetes.io/service-name" label to exclude mirrored EndpointSlices + filterFunc = func(obj interface{}) bool { + if endpointSlice, ok := obj.(*discoveryv1.EndpointSlice); ok { + return IsDefaultEndpointSlice(endpointSlice) + } + klog.Errorf("Failed to cast the object to *discovery.EndpointSlice: %v", obj) + return true + } + + } else if netInfo.IsPrimaryNetwork() { + // Only consider mirrored endpointslices for the given network + filterFunc = func(obj interface{}) bool { + if endpointSlice, ok := obj.(*discoveryv1.EndpointSlice); ok { + isDefault := IsDefaultEndpointSlice(endpointSlice) + isForThisNetwork := IsEndpointSliceForNetwork(endpointSlice, netInfo) + return !isDefault && isForThisNetwork + } + klog.Errorf("Failed to cast the object to *discovery.EndpointSlice: %v", obj) + return true + } + } + if filterFunc != nil { eventHandler = cache.FilteringResourceEventHandler{ - FilterFunc: func(obj interface{}) bool { - if endpointSlice, ok := obj.(*discoveryv1.EndpointSlice); ok { - return IsDefaultEndpointSlice(endpointSlice) - } - klog.Errorf("Failed to cast the object to *discovery.EndpointSlice: %v", obj) - return true - }, - Handler: handlerFuncs, + FilterFunc: filterFunc, + Handler: handlerFuncs, } } + return eventHandler } + +// GetEndpointSlicesBySelector returns a list of EndpointSlices in a given namespace by the label selector +func GetEndpointSlicesBySelector(namespace string, labelSelector metav1.LabelSelector, endpointSliceLister discoverylisters.EndpointSliceLister) ([]*discoveryv1.EndpointSlice, error) { + selector, err := metav1.LabelSelectorAsSelector(&labelSelector) + if err != nil { + return nil, err + } + return endpointSliceLister.EndpointSlices(namespace).List(selector) +} + +// GetServiceEndpointSlices returns the endpointSlices associated with a service for the specified network +// if network is DefaultNetworkName the default endpointSlices are returned, otherwise the function looks for mirror endpointslices +// for the specified network. +func GetServiceEndpointSlices(namespace, svcName, network string, endpointSliceLister discoverylisters.EndpointSliceLister) ([]*discovery.EndpointSlice, error) { + var selector metav1.LabelSelector + if network == types.DefaultNetworkName { + selector = metav1.LabelSelector{MatchLabels: map[string]string{ + discovery.LabelServiceName: svcName, + }} + } else { + selector = metav1.LabelSelector{MatchLabels: map[string]string{ + types.LabelUserDefinedServiceName: svcName, + types.LabelUserDefinedEndpointSliceNetwork: network, + }} + } + return GetEndpointSlicesBySelector(namespace, selector, endpointSliceLister) +} + +// IsUDNEnabledService checks whether the provided namespaced name key is a UDN enabled service specified in config.Default.UDNAllowedDefaultServices +func IsUDNEnabledService(key string) bool { + for _, enabledService := range config.Default.UDNAllowedDefaultServices { + if enabledService == key { + return true + } + } + return false +} + +// ServiceFromEndpointSlice returns the namespaced name of the service that corresponds to the given endpointSlice +// in the given network. If the service label is missing the returned namespaced name and the error are nil. +func ServiceFromEndpointSlice(eps *discovery.EndpointSlice, netInfo NetInfo) (*k8stypes.NamespacedName, error) { + labelKey := discovery.LabelServiceName + if netInfo.IsPrimaryNetwork() { + if eps.Labels[types.LabelUserDefinedEndpointSliceNetwork] != netInfo.GetNetworkName() { + return nil, fmt.Errorf("endpointslice %s/%s does not belong to %s network", eps.Namespace, eps.Name, netInfo.GetNetworkName()) + } + labelKey = types.LabelUserDefinedServiceName + } + svcName, found := eps.Labels[labelKey] + if !found { + return nil, nil + } + + if svcName == "" { + return nil, fmt.Errorf("endpointslice %s/%s has empty svcName for label %s in network %s", + eps.Namespace, eps.Name, labelKey, netInfo.GetNetworkName()) + } + + return &k8stypes.NamespacedName{Namespace: eps.Namespace, Name: svcName}, nil +} diff --git a/go-controller/pkg/util/util_suite_test.go b/go-controller/pkg/util/util_suite_test.go index 261030c018..c751f455a0 100644 --- a/go-controller/pkg/util/util_suite_test.go +++ b/go-controller/pkg/util/util_suite_test.go @@ -3,7 +3,7 @@ package util import ( "testing" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" ) diff --git a/go-controller/pkg/util/util_unit_test.go b/go-controller/pkg/util/util_unit_test.go index 1bc19e3353..658c945a34 100644 --- a/go-controller/pkg/util/util_unit_test.go +++ b/go-controller/pkg/util/util_unit_test.go @@ -9,18 +9,18 @@ import ( "strconv" "testing" - nadapi "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + cnitypes "github.com/containernetworking/cni/pkg/types" + "github.com/stretchr/testify/assert" + discovery "k8s.io/api/discovery/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8stypes "k8s.io/apimachinery/pkg/types" + + ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" - ovntypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/util/sets" - - v1nadmocks "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/mocks/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/listers/k8s.cni.cncf.io/v1" mock_k8s_io_utils_exec "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/mocks/k8s.io/utils/exec" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/mocks" - "github.com/stretchr/testify/assert" ) func TestGetLegacyK8sMgmtIntfName(t *testing.T) { @@ -247,123 +247,136 @@ func TestFilterIPsSlice(t *testing.T) { } } -func TestGetActiveNetworkForNamespace(t *testing.T) { +func TestGenerateId(t *testing.T) { + id := GenerateId(10) + assert.Equal(t, 10, len(id)) + matchesPattern, _ := regexp.MatchString("([a-zA-Z0-9-]*)", id) + assert.True(t, matchesPattern) +} - config.OVNKubernetesFeature.EnableMultiNetwork = true - config.OVNKubernetesFeature.EnableNetworkSegmentation = true +func TestGetNetworkScopedK8sMgmtHostIntfName(t *testing.T) { + intfName := GetNetworkScopedK8sMgmtHostIntfName(1245678) + assert.Equal(t, "ovn-k8s-mp12456", intfName) +} + +func TestServiceFromEndpointSlice(t *testing.T) { + config.IPv4Mode = true + type args struct { + eps *discovery.EndpointSlice + netInfo NetInfo + } + netInfo, _ := NewNetInfo( + &ovncnitypes.NetConf{ + NetConf: cnitypes.NetConf{Name: "primary-network"}, + Topology: types.Layer3Topology, + Subnets: "10.1.130.0/16/24", + Role: types.NetworkRolePrimary, + }) + defaultNetInfo, _ := NewNetInfo( + &ovncnitypes.NetConf{ + NetConf: cnitypes.NetConf{Name: types.DefaultNetworkName}, + }) var tests = []struct { - name string - nads []*nadapi.NetworkAttachmentDefinition - namespace string - expectedActiveNetwork NetInfo - expectedErr error + name string + args args + want *k8stypes.NamespacedName + wantErr assert.ErrorAssertionFunc }{ { - name: "more than 1 primary NAD found in provided namespace", - nads: []*nadapi.NetworkAttachmentDefinition{ - ovntest.GenerateNAD("surya", "miguel", "default", - types.Layer3Topology, "100.128.0.0/16", types.NetworkRolePrimary), - ovntest.GenerateNAD("surya", "miguel", "default", - types.Layer2Topology, "10.100.200.0/24", types.NetworkRolePrimary), + name: "Primary network with matching label", + args: args{ + eps: &discovery.EndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-namespace", + Name: "test-eps", + Labels: map[string]string{ + types.LabelUserDefinedEndpointSliceNetwork: "primary-network", + types.LabelUserDefinedServiceName: "test-service", + }, + }, + }, + netInfo: netInfo, }, - expectedErr: &UnknownActiveNetworkError{namespace: "default"}, - namespace: "default", - expectedActiveNetwork: nil, + want: &k8stypes.NamespacedName{ + Namespace: "test-namespace", + Name: "test-service", + }, + wantErr: assert.NoError, }, { - name: "0 NADs found in the provided namespace", - nads: []*nadapi.NetworkAttachmentDefinition{ - ovntest.GenerateNAD("surya", "quique", "ns1", - types.Layer3Topology, "100.128.0.0/16", types.NetworkRolePrimary), - ovntest.GenerateNAD("surya", "quique", "ns2", - types.Layer2Topology, "10.100.200.0/24", types.NetworkRoleSecondary), + name: "Wrong primary network with matching label", + args: args{ + eps: &discovery.EndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-namespace", + Name: "test-eps", + Labels: map[string]string{ + types.LabelUserDefinedEndpointSliceNetwork: "wrong-network", + types.LabelUserDefinedServiceName: "test-service", + }, + }, + }, + netInfo: netInfo, }, - expectedErr: nil, - namespace: "default", - expectedActiveNetwork: &DefaultNetInfo{}, + want: nil, + wantErr: assert.Error, }, { - name: "exactly 1 primary NAD found in the provided namespace", - nads: []*nadapi.NetworkAttachmentDefinition{ - ovntest.GenerateNAD("surya", "quique", "ns1", - types.Layer3Topology, "100.128.0.0/16", types.NetworkRolePrimary), - ovntest.GenerateNAD("surya", "quique1", "ns1", - types.Layer2Topology, "10.100.200.0/24", types.NetworkRoleSecondary), - ovntest.GenerateNADWithConfig("quique2", "ns1", ` -{ - "cniVersion": "whocares", - "nme": bad, - "typ": bad, -} -`), - }, - expectedErr: nil, - namespace: "ns1", - expectedActiveNetwork: &secondaryNetInfo{ - netName: "surya", - primaryNetwork: true, - topology: "layer3", - nadNames: sets.New("ns1/quique"), - mtu: 1300, - ipv4mode: true, - subnets: []config.CIDRNetworkEntry{{ - CIDR: ovntest.MustParseIPNet("100.128.0.0/16"), - HostSubnetLength: 24, - }}, - joinSubnets: []*net.IPNet{ - ovntest.MustParseIPNet(ovntypes.UserDefinedPrimaryNetworkJoinSubnetV4), - ovntest.MustParseIPNet(ovntypes.UserDefinedPrimaryNetworkJoinSubnetV6), + name: "Primary network with no service label set", + args: args{ + eps: &discovery.EndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-namespace", + Name: "test-eps", + Labels: map[string]string{ + types.LabelUserDefinedEndpointSliceNetwork: "primary-network", + }, + }, }, + netInfo: netInfo, }, + want: nil, + wantErr: assert.NoError, }, { - name: "no NADs found in provided namespace", - nads: []*nadapi.NetworkAttachmentDefinition{}, - expectedErr: nil, - namespace: "default", - expectedActiveNetwork: &DefaultNetInfo{}, + name: "default network with a service label set", + args: args{ + eps: &discovery.EndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-namespace", + Name: "test-eps", + Labels: map[string]string{ + discovery.LabelServiceName: "test-service", + }, + }, + }, + netInfo: defaultNetInfo, + }, + want: &k8stypes.NamespacedName{Namespace: "test-namespace", Name: "test-service"}, + wantErr: assert.NoError, }, { - name: "no primary NADs found in the provided namespace", - nads: []*nadapi.NetworkAttachmentDefinition{ - ovntest.GenerateNAD("quique", "miguel", "default", - types.Layer3Topology, "100.128.0.0/16/24", types.NetworkRoleSecondary), - ovntest.GenerateNAD("quique", "miguel", "default", - types.Layer2Topology, "10.100.200.0/24", types.NetworkRoleSecondary), + name: "default network with no service label set", + args: args{ + eps: &discovery.EndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-namespace", + Name: "test-eps", + }, + }, + netInfo: defaultNetInfo, }, - expectedErr: nil, - namespace: "default", - expectedActiveNetwork: &DefaultNetInfo{}, + want: nil, + wantErr: assert.NoError, }, } - - for i, tc := range tests { - t.Run(strconv.Itoa(i), func(t *testing.T) { - nadLister := v1nadmocks.NetworkAttachmentDefinitionLister{} - nadNamespaceLister := v1nadmocks.NetworkAttachmentDefinitionNamespaceLister{} - nadLister.On("NetworkAttachmentDefinitions", tc.namespace).Return(&nadNamespaceLister) - mockedNADs := []*nadapi.NetworkAttachmentDefinition{} - for _, nad := range tc.nads { - if nad.Namespace == tc.namespace { // need to hack this in tests given its hard to simulate listers - mockedNADs = append(mockedNADs, nad) - } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := ServiceFromEndpointSlice(tt.args.eps, tt.args.netInfo) + if !tt.wantErr(t, err, fmt.Sprintf("ServiceFromEndpointSlice(%v, %v)", tt.args.eps, tt.args.netInfo)) { + return } - nadNamespaceLister.On("List", labels.Everything()).Return(mockedNADs, nil) - activeNetwork, err := GetActiveNetworkForNamespace(tc.namespace, &nadLister) - assert.Equal(t, tc.expectedErr, err) - assert.Equal(t, tc.expectedActiveNetwork, activeNetwork) + assert.Equalf(t, tt.want, got, "ServiceFromEndpointSlice(%v, %v)", tt.args.eps, tt.args.netInfo) }) } } - -func TestGenerateId(t *testing.T) { - id := GenerateId(10) - assert.Equal(t, 10, len(id)) - matchesPattern, _ := regexp.MatchString("([a-zA-Z0-9-]*)", id) - assert.True(t, matchesPattern) -} - -func TestGetNetworkScopedK8sMgmtHostIntfName(t *testing.T) { - intfName := GetNetworkScopedK8sMgmtHostIntfName(1245678) - assert.Equal(t, "ovn-k8s-mp12456", intfName) -} diff --git a/go-controller/pkg/vswitchd/autoattach.go b/go-controller/pkg/vswitchd/autoattach.go new file mode 100644 index 0000000000..b9655736aa --- /dev/null +++ b/go-controller/pkg/vswitchd/autoattach.go @@ -0,0 +1,93 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package vswitchd + +import "github.com/ovn-org/libovsdb/model" + +const AutoAttachTable = "AutoAttach" + +// AutoAttach defines an object in AutoAttach table +type AutoAttach struct { + UUID string `ovsdb:"_uuid"` + Mappings map[int]int `ovsdb:"mappings"` + SystemDescription string `ovsdb:"system_description"` + SystemName string `ovsdb:"system_name"` +} + +func (a *AutoAttach) GetUUID() string { + return a.UUID +} + +func (a *AutoAttach) GetMappings() map[int]int { + return a.Mappings +} + +func copyAutoAttachMappings(a map[int]int) map[int]int { + if a == nil { + return nil + } + b := make(map[int]int, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalAutoAttachMappings(a, b map[int]int) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *AutoAttach) GetSystemDescription() string { + return a.SystemDescription +} + +func (a *AutoAttach) GetSystemName() string { + return a.SystemName +} + +func (a *AutoAttach) DeepCopyInto(b *AutoAttach) { + *b = *a + b.Mappings = copyAutoAttachMappings(a.Mappings) +} + +func (a *AutoAttach) DeepCopy() *AutoAttach { + b := new(AutoAttach) + a.DeepCopyInto(b) + return b +} + +func (a *AutoAttach) CloneModelInto(b model.Model) { + c := b.(*AutoAttach) + a.DeepCopyInto(c) +} + +func (a *AutoAttach) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *AutoAttach) Equals(b *AutoAttach) bool { + return a.UUID == b.UUID && + equalAutoAttachMappings(a.Mappings, b.Mappings) && + a.SystemDescription == b.SystemDescription && + a.SystemName == b.SystemName +} + +func (a *AutoAttach) EqualsModel(b model.Model) bool { + c := b.(*AutoAttach) + return a.Equals(c) +} + +var _ model.CloneableModel = &AutoAttach{} +var _ model.ComparableModel = &AutoAttach{} diff --git a/go-controller/pkg/vswitchd/bridge.go b/go-controller/pkg/vswitchd/bridge.go new file mode 100644 index 0000000000..8953faa3f2 --- /dev/null +++ b/go-controller/pkg/vswitchd/bridge.go @@ -0,0 +1,570 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package vswitchd + +import "github.com/ovn-org/libovsdb/model" + +const BridgeTable = "Bridge" + +type ( + BridgeFailMode = string + BridgeProtocols = string +) + +var ( + BridgeFailModeStandalone BridgeFailMode = "standalone" + BridgeFailModeSecure BridgeFailMode = "secure" + BridgeProtocolsOpenflow10 BridgeProtocols = "OpenFlow10" + BridgeProtocolsOpenflow11 BridgeProtocols = "OpenFlow11" + BridgeProtocolsOpenflow12 BridgeProtocols = "OpenFlow12" + BridgeProtocolsOpenflow13 BridgeProtocols = "OpenFlow13" + BridgeProtocolsOpenflow14 BridgeProtocols = "OpenFlow14" + BridgeProtocolsOpenflow15 BridgeProtocols = "OpenFlow15" +) + +// Bridge defines an object in Bridge table +type Bridge struct { + UUID string `ovsdb:"_uuid"` + AutoAttach *string `ovsdb:"auto_attach"` + Controller []string `ovsdb:"controller"` + DatapathID *string `ovsdb:"datapath_id"` + DatapathType string `ovsdb:"datapath_type"` + DatapathVersion string `ovsdb:"datapath_version"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + FailMode *BridgeFailMode `ovsdb:"fail_mode"` + FloodVLANs []int `ovsdb:"flood_vlans"` + FlowTables map[int]string `ovsdb:"flow_tables"` + IPFIX *string `ovsdb:"ipfix"` + McastSnoopingEnable bool `ovsdb:"mcast_snooping_enable"` + Mirrors []string `ovsdb:"mirrors"` + Name string `ovsdb:"name"` + Netflow *string `ovsdb:"netflow"` + OtherConfig map[string]string `ovsdb:"other_config"` + Ports []string `ovsdb:"ports"` + Protocols []BridgeProtocols `ovsdb:"protocols"` + RSTPEnable bool `ovsdb:"rstp_enable"` + RSTPStatus map[string]string `ovsdb:"rstp_status"` + Sflow *string `ovsdb:"sflow"` + Status map[string]string `ovsdb:"status"` + STPEnable bool `ovsdb:"stp_enable"` +} + +func (a *Bridge) GetUUID() string { + return a.UUID +} + +func (a *Bridge) GetAutoAttach() *string { + return a.AutoAttach +} + +func copyBridgeAutoAttach(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBridgeAutoAttach(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Bridge) GetController() []string { + return a.Controller +} + +func copyBridgeController(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalBridgeController(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Bridge) GetDatapathID() *string { + return a.DatapathID +} + +func copyBridgeDatapathID(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBridgeDatapathID(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Bridge) GetDatapathType() string { + return a.DatapathType +} + +func (a *Bridge) GetDatapathVersion() string { + return a.DatapathVersion +} + +func (a *Bridge) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyBridgeExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalBridgeExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Bridge) GetFailMode() *BridgeFailMode { + return a.FailMode +} + +func copyBridgeFailMode(a *BridgeFailMode) *BridgeFailMode { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBridgeFailMode(a, b *BridgeFailMode) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Bridge) GetFloodVLANs() []int { + return a.FloodVLANs +} + +func copyBridgeFloodVLANs(a []int) []int { + if a == nil { + return nil + } + b := make([]int, len(a)) + copy(b, a) + return b +} + +func equalBridgeFloodVLANs(a, b []int) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Bridge) GetFlowTables() map[int]string { + return a.FlowTables +} + +func copyBridgeFlowTables(a map[int]string) map[int]string { + if a == nil { + return nil + } + b := make(map[int]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalBridgeFlowTables(a, b map[int]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Bridge) GetIPFIX() *string { + return a.IPFIX +} + +func copyBridgeIPFIX(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBridgeIPFIX(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Bridge) GetMcastSnoopingEnable() bool { + return a.McastSnoopingEnable +} + +func (a *Bridge) GetMirrors() []string { + return a.Mirrors +} + +func copyBridgeMirrors(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalBridgeMirrors(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Bridge) GetName() string { + return a.Name +} + +func (a *Bridge) GetNetflow() *string { + return a.Netflow +} + +func copyBridgeNetflow(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBridgeNetflow(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Bridge) GetOtherConfig() map[string]string { + return a.OtherConfig +} + +func copyBridgeOtherConfig(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalBridgeOtherConfig(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Bridge) GetPorts() []string { + return a.Ports +} + +func copyBridgePorts(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalBridgePorts(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Bridge) GetProtocols() []BridgeProtocols { + return a.Protocols +} + +func copyBridgeProtocols(a []BridgeProtocols) []BridgeProtocols { + if a == nil { + return nil + } + b := make([]BridgeProtocols, len(a)) + copy(b, a) + return b +} + +func equalBridgeProtocols(a, b []BridgeProtocols) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Bridge) GetRSTPEnable() bool { + return a.RSTPEnable +} + +func (a *Bridge) GetRSTPStatus() map[string]string { + return a.RSTPStatus +} + +func copyBridgeRSTPStatus(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalBridgeRSTPStatus(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Bridge) GetSflow() *string { + return a.Sflow +} + +func copyBridgeSflow(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBridgeSflow(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Bridge) GetStatus() map[string]string { + return a.Status +} + +func copyBridgeStatus(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalBridgeStatus(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Bridge) GetSTPEnable() bool { + return a.STPEnable +} + +func (a *Bridge) DeepCopyInto(b *Bridge) { + *b = *a + b.AutoAttach = copyBridgeAutoAttach(a.AutoAttach) + b.Controller = copyBridgeController(a.Controller) + b.DatapathID = copyBridgeDatapathID(a.DatapathID) + b.ExternalIDs = copyBridgeExternalIDs(a.ExternalIDs) + b.FailMode = copyBridgeFailMode(a.FailMode) + b.FloodVLANs = copyBridgeFloodVLANs(a.FloodVLANs) + b.FlowTables = copyBridgeFlowTables(a.FlowTables) + b.IPFIX = copyBridgeIPFIX(a.IPFIX) + b.Mirrors = copyBridgeMirrors(a.Mirrors) + b.Netflow = copyBridgeNetflow(a.Netflow) + b.OtherConfig = copyBridgeOtherConfig(a.OtherConfig) + b.Ports = copyBridgePorts(a.Ports) + b.Protocols = copyBridgeProtocols(a.Protocols) + b.RSTPStatus = copyBridgeRSTPStatus(a.RSTPStatus) + b.Sflow = copyBridgeSflow(a.Sflow) + b.Status = copyBridgeStatus(a.Status) +} + +func (a *Bridge) DeepCopy() *Bridge { + b := new(Bridge) + a.DeepCopyInto(b) + return b +} + +func (a *Bridge) CloneModelInto(b model.Model) { + c := b.(*Bridge) + a.DeepCopyInto(c) +} + +func (a *Bridge) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Bridge) Equals(b *Bridge) bool { + return a.UUID == b.UUID && + equalBridgeAutoAttach(a.AutoAttach, b.AutoAttach) && + equalBridgeController(a.Controller, b.Controller) && + equalBridgeDatapathID(a.DatapathID, b.DatapathID) && + a.DatapathType == b.DatapathType && + a.DatapathVersion == b.DatapathVersion && + equalBridgeExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalBridgeFailMode(a.FailMode, b.FailMode) && + equalBridgeFloodVLANs(a.FloodVLANs, b.FloodVLANs) && + equalBridgeFlowTables(a.FlowTables, b.FlowTables) && + equalBridgeIPFIX(a.IPFIX, b.IPFIX) && + a.McastSnoopingEnable == b.McastSnoopingEnable && + equalBridgeMirrors(a.Mirrors, b.Mirrors) && + a.Name == b.Name && + equalBridgeNetflow(a.Netflow, b.Netflow) && + equalBridgeOtherConfig(a.OtherConfig, b.OtherConfig) && + equalBridgePorts(a.Ports, b.Ports) && + equalBridgeProtocols(a.Protocols, b.Protocols) && + a.RSTPEnable == b.RSTPEnable && + equalBridgeRSTPStatus(a.RSTPStatus, b.RSTPStatus) && + equalBridgeSflow(a.Sflow, b.Sflow) && + equalBridgeStatus(a.Status, b.Status) && + a.STPEnable == b.STPEnable +} + +func (a *Bridge) EqualsModel(b model.Model) bool { + c := b.(*Bridge) + return a.Equals(c) +} + +var _ model.CloneableModel = &Bridge{} +var _ model.ComparableModel = &Bridge{} diff --git a/go-controller/pkg/vswitchd/controller.go b/go-controller/pkg/vswitchd/controller.go new file mode 100644 index 0000000000..1b38c989bf --- /dev/null +++ b/go-controller/pkg/vswitchd/controller.go @@ -0,0 +1,475 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package vswitchd + +import "github.com/ovn-org/libovsdb/model" + +const ControllerTable = "Controller" + +type ( + ControllerConnectionMode = string + ControllerRole = string + ControllerType = string +) + +var ( + ControllerConnectionModeInBand ControllerConnectionMode = "in-band" + ControllerConnectionModeOutOfBand ControllerConnectionMode = "out-of-band" + ControllerRoleOther ControllerRole = "other" + ControllerRoleMaster ControllerRole = "master" + ControllerRoleSlave ControllerRole = "slave" + ControllerTypePrimary ControllerType = "primary" + ControllerTypeService ControllerType = "service" +) + +// Controller defines an object in Controller table +type Controller struct { + UUID string `ovsdb:"_uuid"` + ConnectionMode *ControllerConnectionMode `ovsdb:"connection_mode"` + ControllerBurstLimit *int `ovsdb:"controller_burst_limit"` + ControllerQueueSize *int `ovsdb:"controller_queue_size"` + ControllerRateLimit *int `ovsdb:"controller_rate_limit"` + EnableAsyncMessages *bool `ovsdb:"enable_async_messages"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + InactivityProbe *int `ovsdb:"inactivity_probe"` + IsConnected bool `ovsdb:"is_connected"` + LocalGateway *string `ovsdb:"local_gateway"` + LocalIP *string `ovsdb:"local_ip"` + LocalNetmask *string `ovsdb:"local_netmask"` + MaxBackoff *int `ovsdb:"max_backoff"` + OtherConfig map[string]string `ovsdb:"other_config"` + Role *ControllerRole `ovsdb:"role"` + Status map[string]string `ovsdb:"status"` + Target string `ovsdb:"target"` + Type *ControllerType `ovsdb:"type"` +} + +func (a *Controller) GetUUID() string { + return a.UUID +} + +func (a *Controller) GetConnectionMode() *ControllerConnectionMode { + return a.ConnectionMode +} + +func copyControllerConnectionMode(a *ControllerConnectionMode) *ControllerConnectionMode { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalControllerConnectionMode(a, b *ControllerConnectionMode) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Controller) GetControllerBurstLimit() *int { + return a.ControllerBurstLimit +} + +func copyControllerControllerBurstLimit(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalControllerControllerBurstLimit(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Controller) GetControllerQueueSize() *int { + return a.ControllerQueueSize +} + +func copyControllerControllerQueueSize(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalControllerControllerQueueSize(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Controller) GetControllerRateLimit() *int { + return a.ControllerRateLimit +} + +func copyControllerControllerRateLimit(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalControllerControllerRateLimit(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Controller) GetEnableAsyncMessages() *bool { + return a.EnableAsyncMessages +} + +func copyControllerEnableAsyncMessages(a *bool) *bool { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalControllerEnableAsyncMessages(a, b *bool) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Controller) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyControllerExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalControllerExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Controller) GetInactivityProbe() *int { + return a.InactivityProbe +} + +func copyControllerInactivityProbe(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalControllerInactivityProbe(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Controller) GetIsConnected() bool { + return a.IsConnected +} + +func (a *Controller) GetLocalGateway() *string { + return a.LocalGateway +} + +func copyControllerLocalGateway(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalControllerLocalGateway(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Controller) GetLocalIP() *string { + return a.LocalIP +} + +func copyControllerLocalIP(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalControllerLocalIP(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Controller) GetLocalNetmask() *string { + return a.LocalNetmask +} + +func copyControllerLocalNetmask(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalControllerLocalNetmask(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Controller) GetMaxBackoff() *int { + return a.MaxBackoff +} + +func copyControllerMaxBackoff(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalControllerMaxBackoff(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Controller) GetOtherConfig() map[string]string { + return a.OtherConfig +} + +func copyControllerOtherConfig(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalControllerOtherConfig(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Controller) GetRole() *ControllerRole { + return a.Role +} + +func copyControllerRole(a *ControllerRole) *ControllerRole { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalControllerRole(a, b *ControllerRole) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Controller) GetStatus() map[string]string { + return a.Status +} + +func copyControllerStatus(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalControllerStatus(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Controller) GetTarget() string { + return a.Target +} + +func (a *Controller) GetType() *ControllerType { + return a.Type +} + +func copyControllerType(a *ControllerType) *ControllerType { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalControllerType(a, b *ControllerType) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Controller) DeepCopyInto(b *Controller) { + *b = *a + b.ConnectionMode = copyControllerConnectionMode(a.ConnectionMode) + b.ControllerBurstLimit = copyControllerControllerBurstLimit(a.ControllerBurstLimit) + b.ControllerQueueSize = copyControllerControllerQueueSize(a.ControllerQueueSize) + b.ControllerRateLimit = copyControllerControllerRateLimit(a.ControllerRateLimit) + b.EnableAsyncMessages = copyControllerEnableAsyncMessages(a.EnableAsyncMessages) + b.ExternalIDs = copyControllerExternalIDs(a.ExternalIDs) + b.InactivityProbe = copyControllerInactivityProbe(a.InactivityProbe) + b.LocalGateway = copyControllerLocalGateway(a.LocalGateway) + b.LocalIP = copyControllerLocalIP(a.LocalIP) + b.LocalNetmask = copyControllerLocalNetmask(a.LocalNetmask) + b.MaxBackoff = copyControllerMaxBackoff(a.MaxBackoff) + b.OtherConfig = copyControllerOtherConfig(a.OtherConfig) + b.Role = copyControllerRole(a.Role) + b.Status = copyControllerStatus(a.Status) + b.Type = copyControllerType(a.Type) +} + +func (a *Controller) DeepCopy() *Controller { + b := new(Controller) + a.DeepCopyInto(b) + return b +} + +func (a *Controller) CloneModelInto(b model.Model) { + c := b.(*Controller) + a.DeepCopyInto(c) +} + +func (a *Controller) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Controller) Equals(b *Controller) bool { + return a.UUID == b.UUID && + equalControllerConnectionMode(a.ConnectionMode, b.ConnectionMode) && + equalControllerControllerBurstLimit(a.ControllerBurstLimit, b.ControllerBurstLimit) && + equalControllerControllerQueueSize(a.ControllerQueueSize, b.ControllerQueueSize) && + equalControllerControllerRateLimit(a.ControllerRateLimit, b.ControllerRateLimit) && + equalControllerEnableAsyncMessages(a.EnableAsyncMessages, b.EnableAsyncMessages) && + equalControllerExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalControllerInactivityProbe(a.InactivityProbe, b.InactivityProbe) && + a.IsConnected == b.IsConnected && + equalControllerLocalGateway(a.LocalGateway, b.LocalGateway) && + equalControllerLocalIP(a.LocalIP, b.LocalIP) && + equalControllerLocalNetmask(a.LocalNetmask, b.LocalNetmask) && + equalControllerMaxBackoff(a.MaxBackoff, b.MaxBackoff) && + equalControllerOtherConfig(a.OtherConfig, b.OtherConfig) && + equalControllerRole(a.Role, b.Role) && + equalControllerStatus(a.Status, b.Status) && + a.Target == b.Target && + equalControllerType(a.Type, b.Type) +} + +func (a *Controller) EqualsModel(b model.Model) bool { + c := b.(*Controller) + return a.Equals(c) +} + +var _ model.CloneableModel = &Controller{} +var _ model.ComparableModel = &Controller{} diff --git a/go-controller/pkg/vswitchd/ct_timeout_policy.go b/go-controller/pkg/vswitchd/ct_timeout_policy.go new file mode 100644 index 0000000000..98bf690498 --- /dev/null +++ b/go-controller/pkg/vswitchd/ct_timeout_policy.go @@ -0,0 +1,137 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package vswitchd + +import "github.com/ovn-org/libovsdb/model" + +const CTTimeoutPolicyTable = "CT_Timeout_Policy" + +type ( + CTTimeoutPolicyTimeouts = string +) + +var ( + CTTimeoutPolicyTimeoutsTCPSynSent CTTimeoutPolicyTimeouts = "tcp_syn_sent" + CTTimeoutPolicyTimeoutsTCPSynRecv CTTimeoutPolicyTimeouts = "tcp_syn_recv" + CTTimeoutPolicyTimeoutsTCPEstablished CTTimeoutPolicyTimeouts = "tcp_established" + CTTimeoutPolicyTimeoutsTCPFinWait CTTimeoutPolicyTimeouts = "tcp_fin_wait" + CTTimeoutPolicyTimeoutsTCPCloseWait CTTimeoutPolicyTimeouts = "tcp_close_wait" + CTTimeoutPolicyTimeoutsTCPLastAck CTTimeoutPolicyTimeouts = "tcp_last_ack" + CTTimeoutPolicyTimeoutsTCPTimeWait CTTimeoutPolicyTimeouts = "tcp_time_wait" + CTTimeoutPolicyTimeoutsTCPClose CTTimeoutPolicyTimeouts = "tcp_close" + CTTimeoutPolicyTimeoutsTCPSynSent2 CTTimeoutPolicyTimeouts = "tcp_syn_sent2" + CTTimeoutPolicyTimeoutsTCPRetransmit CTTimeoutPolicyTimeouts = "tcp_retransmit" + CTTimeoutPolicyTimeoutsTCPUnack CTTimeoutPolicyTimeouts = "tcp_unack" + CTTimeoutPolicyTimeoutsUDPFirst CTTimeoutPolicyTimeouts = "udp_first" + CTTimeoutPolicyTimeoutsUDPSingle CTTimeoutPolicyTimeouts = "udp_single" + CTTimeoutPolicyTimeoutsUDPMultiple CTTimeoutPolicyTimeouts = "udp_multiple" + CTTimeoutPolicyTimeoutsICMPFirst CTTimeoutPolicyTimeouts = "icmp_first" + CTTimeoutPolicyTimeoutsICMPReply CTTimeoutPolicyTimeouts = "icmp_reply" +) + +// CTTimeoutPolicy defines an object in CT_Timeout_Policy table +type CTTimeoutPolicy struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Timeouts map[string]int `ovsdb:"timeouts"` +} + +func (a *CTTimeoutPolicy) GetUUID() string { + return a.UUID +} + +func (a *CTTimeoutPolicy) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyCTTimeoutPolicyExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalCTTimeoutPolicyExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *CTTimeoutPolicy) GetTimeouts() map[string]int { + return a.Timeouts +} + +func copyCTTimeoutPolicyTimeouts(a map[string]int) map[string]int { + if a == nil { + return nil + } + b := make(map[string]int, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalCTTimeoutPolicyTimeouts(a, b map[string]int) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *CTTimeoutPolicy) DeepCopyInto(b *CTTimeoutPolicy) { + *b = *a + b.ExternalIDs = copyCTTimeoutPolicyExternalIDs(a.ExternalIDs) + b.Timeouts = copyCTTimeoutPolicyTimeouts(a.Timeouts) +} + +func (a *CTTimeoutPolicy) DeepCopy() *CTTimeoutPolicy { + b := new(CTTimeoutPolicy) + a.DeepCopyInto(b) + return b +} + +func (a *CTTimeoutPolicy) CloneModelInto(b model.Model) { + c := b.(*CTTimeoutPolicy) + a.DeepCopyInto(c) +} + +func (a *CTTimeoutPolicy) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *CTTimeoutPolicy) Equals(b *CTTimeoutPolicy) bool { + return a.UUID == b.UUID && + equalCTTimeoutPolicyExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalCTTimeoutPolicyTimeouts(a.Timeouts, b.Timeouts) +} + +func (a *CTTimeoutPolicy) EqualsModel(b model.Model) bool { + c := b.(*CTTimeoutPolicy) + return a.Equals(c) +} + +var _ model.CloneableModel = &CTTimeoutPolicy{} +var _ model.ComparableModel = &CTTimeoutPolicy{} diff --git a/go-controller/pkg/vswitchd/ct_zone.go b/go-controller/pkg/vswitchd/ct_zone.go new file mode 100644 index 0000000000..4eaba845c4 --- /dev/null +++ b/go-controller/pkg/vswitchd/ct_zone.go @@ -0,0 +1,106 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package vswitchd + +import "github.com/ovn-org/libovsdb/model" + +const CTZoneTable = "CT_Zone" + +// CTZone defines an object in CT_Zone table +type CTZone struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + TimeoutPolicy *string `ovsdb:"timeout_policy"` +} + +func (a *CTZone) GetUUID() string { + return a.UUID +} + +func (a *CTZone) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyCTZoneExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalCTZoneExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *CTZone) GetTimeoutPolicy() *string { + return a.TimeoutPolicy +} + +func copyCTZoneTimeoutPolicy(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalCTZoneTimeoutPolicy(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *CTZone) DeepCopyInto(b *CTZone) { + *b = *a + b.ExternalIDs = copyCTZoneExternalIDs(a.ExternalIDs) + b.TimeoutPolicy = copyCTZoneTimeoutPolicy(a.TimeoutPolicy) +} + +func (a *CTZone) DeepCopy() *CTZone { + b := new(CTZone) + a.DeepCopyInto(b) + return b +} + +func (a *CTZone) CloneModelInto(b model.Model) { + c := b.(*CTZone) + a.DeepCopyInto(c) +} + +func (a *CTZone) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *CTZone) Equals(b *CTZone) bool { + return a.UUID == b.UUID && + equalCTZoneExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalCTZoneTimeoutPolicy(a.TimeoutPolicy, b.TimeoutPolicy) +} + +func (a *CTZone) EqualsModel(b model.Model) bool { + c := b.(*CTZone) + return a.Equals(c) +} + +var _ model.CloneableModel = &CTZone{} +var _ model.ComparableModel = &CTZone{} diff --git a/go-controller/pkg/vswitchd/datapath.go b/go-controller/pkg/vswitchd/datapath.go new file mode 100644 index 0000000000..71a995f93e --- /dev/null +++ b/go-controller/pkg/vswitchd/datapath.go @@ -0,0 +1,153 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package vswitchd + +import "github.com/ovn-org/libovsdb/model" + +const DatapathTable = "Datapath" + +// Datapath defines an object in Datapath table +type Datapath struct { + UUID string `ovsdb:"_uuid"` + Capabilities map[string]string `ovsdb:"capabilities"` + CTZones map[int]string `ovsdb:"ct_zones"` + DatapathVersion string `ovsdb:"datapath_version"` + ExternalIDs map[string]string `ovsdb:"external_ids"` +} + +func (a *Datapath) GetUUID() string { + return a.UUID +} + +func (a *Datapath) GetCapabilities() map[string]string { + return a.Capabilities +} + +func copyDatapathCapabilities(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalDatapathCapabilities(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Datapath) GetCTZones() map[int]string { + return a.CTZones +} + +func copyDatapathCTZones(a map[int]string) map[int]string { + if a == nil { + return nil + } + b := make(map[int]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalDatapathCTZones(a, b map[int]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Datapath) GetDatapathVersion() string { + return a.DatapathVersion +} + +func (a *Datapath) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyDatapathExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalDatapathExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Datapath) DeepCopyInto(b *Datapath) { + *b = *a + b.Capabilities = copyDatapathCapabilities(a.Capabilities) + b.CTZones = copyDatapathCTZones(a.CTZones) + b.ExternalIDs = copyDatapathExternalIDs(a.ExternalIDs) +} + +func (a *Datapath) DeepCopy() *Datapath { + b := new(Datapath) + a.DeepCopyInto(b) + return b +} + +func (a *Datapath) CloneModelInto(b model.Model) { + c := b.(*Datapath) + a.DeepCopyInto(c) +} + +func (a *Datapath) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Datapath) Equals(b *Datapath) bool { + return a.UUID == b.UUID && + equalDatapathCapabilities(a.Capabilities, b.Capabilities) && + equalDatapathCTZones(a.CTZones, b.CTZones) && + a.DatapathVersion == b.DatapathVersion && + equalDatapathExternalIDs(a.ExternalIDs, b.ExternalIDs) +} + +func (a *Datapath) EqualsModel(b model.Model) bool { + c := b.(*Datapath) + return a.Equals(c) +} + +var _ model.CloneableModel = &Datapath{} +var _ model.ComparableModel = &Datapath{} diff --git a/go-controller/pkg/vswitchd/flow_sample_collector_set.go b/go-controller/pkg/vswitchd/flow_sample_collector_set.go new file mode 100644 index 0000000000..2c90f5d438 --- /dev/null +++ b/go-controller/pkg/vswitchd/flow_sample_collector_set.go @@ -0,0 +1,118 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package vswitchd + +import "github.com/ovn-org/libovsdb/model" + +const FlowSampleCollectorSetTable = "Flow_Sample_Collector_Set" + +// FlowSampleCollectorSet defines an object in Flow_Sample_Collector_Set table +type FlowSampleCollectorSet struct { + UUID string `ovsdb:"_uuid"` + Bridge string `ovsdb:"bridge"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + ID int `ovsdb:"id"` + IPFIX *string `ovsdb:"ipfix"` +} + +func (a *FlowSampleCollectorSet) GetUUID() string { + return a.UUID +} + +func (a *FlowSampleCollectorSet) GetBridge() string { + return a.Bridge +} + +func (a *FlowSampleCollectorSet) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyFlowSampleCollectorSetExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalFlowSampleCollectorSetExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *FlowSampleCollectorSet) GetID() int { + return a.ID +} + +func (a *FlowSampleCollectorSet) GetIPFIX() *string { + return a.IPFIX +} + +func copyFlowSampleCollectorSetIPFIX(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalFlowSampleCollectorSetIPFIX(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *FlowSampleCollectorSet) DeepCopyInto(b *FlowSampleCollectorSet) { + *b = *a + b.ExternalIDs = copyFlowSampleCollectorSetExternalIDs(a.ExternalIDs) + b.IPFIX = copyFlowSampleCollectorSetIPFIX(a.IPFIX) +} + +func (a *FlowSampleCollectorSet) DeepCopy() *FlowSampleCollectorSet { + b := new(FlowSampleCollectorSet) + a.DeepCopyInto(b) + return b +} + +func (a *FlowSampleCollectorSet) CloneModelInto(b model.Model) { + c := b.(*FlowSampleCollectorSet) + a.DeepCopyInto(c) +} + +func (a *FlowSampleCollectorSet) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *FlowSampleCollectorSet) Equals(b *FlowSampleCollectorSet) bool { + return a.UUID == b.UUID && + a.Bridge == b.Bridge && + equalFlowSampleCollectorSetExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.ID == b.ID && + equalFlowSampleCollectorSetIPFIX(a.IPFIX, b.IPFIX) +} + +func (a *FlowSampleCollectorSet) EqualsModel(b model.Model) bool { + c := b.(*FlowSampleCollectorSet) + return a.Equals(c) +} + +var _ model.CloneableModel = &FlowSampleCollectorSet{} +var _ model.ComparableModel = &FlowSampleCollectorSet{} diff --git a/go-controller/pkg/vswitchd/flow_table.go b/go-controller/pkg/vswitchd/flow_table.go new file mode 100644 index 0000000000..42d49d2f58 --- /dev/null +++ b/go-controller/pkg/vswitchd/flow_table.go @@ -0,0 +1,227 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package vswitchd + +import "github.com/ovn-org/libovsdb/model" + +const FlowTableTable = "Flow_Table" + +type ( + FlowTableOverflowPolicy = string +) + +var ( + FlowTableOverflowPolicyRefuse FlowTableOverflowPolicy = "refuse" + FlowTableOverflowPolicyEvict FlowTableOverflowPolicy = "evict" +) + +// FlowTable defines an object in Flow_Table table +type FlowTable struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + FlowLimit *int `ovsdb:"flow_limit"` + Groups []string `ovsdb:"groups"` + Name *string `ovsdb:"name"` + OverflowPolicy *FlowTableOverflowPolicy `ovsdb:"overflow_policy"` + Prefixes []string `ovsdb:"prefixes"` +} + +func (a *FlowTable) GetUUID() string { + return a.UUID +} + +func (a *FlowTable) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyFlowTableExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalFlowTableExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *FlowTable) GetFlowLimit() *int { + return a.FlowLimit +} + +func copyFlowTableFlowLimit(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalFlowTableFlowLimit(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *FlowTable) GetGroups() []string { + return a.Groups +} + +func copyFlowTableGroups(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalFlowTableGroups(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *FlowTable) GetName() *string { + return a.Name +} + +func copyFlowTableName(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalFlowTableName(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *FlowTable) GetOverflowPolicy() *FlowTableOverflowPolicy { + return a.OverflowPolicy +} + +func copyFlowTableOverflowPolicy(a *FlowTableOverflowPolicy) *FlowTableOverflowPolicy { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalFlowTableOverflowPolicy(a, b *FlowTableOverflowPolicy) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *FlowTable) GetPrefixes() []string { + return a.Prefixes +} + +func copyFlowTablePrefixes(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalFlowTablePrefixes(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *FlowTable) DeepCopyInto(b *FlowTable) { + *b = *a + b.ExternalIDs = copyFlowTableExternalIDs(a.ExternalIDs) + b.FlowLimit = copyFlowTableFlowLimit(a.FlowLimit) + b.Groups = copyFlowTableGroups(a.Groups) + b.Name = copyFlowTableName(a.Name) + b.OverflowPolicy = copyFlowTableOverflowPolicy(a.OverflowPolicy) + b.Prefixes = copyFlowTablePrefixes(a.Prefixes) +} + +func (a *FlowTable) DeepCopy() *FlowTable { + b := new(FlowTable) + a.DeepCopyInto(b) + return b +} + +func (a *FlowTable) CloneModelInto(b model.Model) { + c := b.(*FlowTable) + a.DeepCopyInto(c) +} + +func (a *FlowTable) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *FlowTable) Equals(b *FlowTable) bool { + return a.UUID == b.UUID && + equalFlowTableExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalFlowTableFlowLimit(a.FlowLimit, b.FlowLimit) && + equalFlowTableGroups(a.Groups, b.Groups) && + equalFlowTableName(a.Name, b.Name) && + equalFlowTableOverflowPolicy(a.OverflowPolicy, b.OverflowPolicy) && + equalFlowTablePrefixes(a.Prefixes, b.Prefixes) +} + +func (a *FlowTable) EqualsModel(b model.Model) bool { + c := b.(*FlowTable) + return a.Equals(c) +} + +var _ model.CloneableModel = &FlowTable{} +var _ model.ComparableModel = &FlowTable{} diff --git a/go-controller/pkg/vswitchd/gen.go b/go-controller/pkg/vswitchd/gen.go new file mode 100644 index 0000000000..d7c5e300a6 --- /dev/null +++ b/go-controller/pkg/vswitchd/gen.go @@ -0,0 +1,3 @@ +package vswitchd + +//go:generate modelgen --extended -p vswitchd -o . vswitch.ovsschema diff --git a/go-controller/pkg/vswitchd/interface.go b/go-controller/pkg/vswitchd/interface.go new file mode 100644 index 0000000000..e6f67ba9c7 --- /dev/null +++ b/go-controller/pkg/vswitchd/interface.go @@ -0,0 +1,903 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package vswitchd + +import "github.com/ovn-org/libovsdb/model" + +const InterfaceTable = "Interface" + +type ( + InterfaceAdminState = string + InterfaceCFMRemoteOpstate = string + InterfaceDuplex = string + InterfaceLinkState = string +) + +var ( + InterfaceAdminStateUp InterfaceAdminState = "up" + InterfaceAdminStateDown InterfaceAdminState = "down" + InterfaceCFMRemoteOpstateUp InterfaceCFMRemoteOpstate = "up" + InterfaceCFMRemoteOpstateDown InterfaceCFMRemoteOpstate = "down" + InterfaceDuplexHalf InterfaceDuplex = "half" + InterfaceDuplexFull InterfaceDuplex = "full" + InterfaceLinkStateUp InterfaceLinkState = "up" + InterfaceLinkStateDown InterfaceLinkState = "down" +) + +// Interface defines an object in Interface table +type Interface struct { + UUID string `ovsdb:"_uuid"` + AdminState *InterfaceAdminState `ovsdb:"admin_state"` + BFD map[string]string `ovsdb:"bfd"` + BFDStatus map[string]string `ovsdb:"bfd_status"` + CFMFault *bool `ovsdb:"cfm_fault"` + CFMFaultStatus []string `ovsdb:"cfm_fault_status"` + CFMFlapCount *int `ovsdb:"cfm_flap_count"` + CFMHealth *int `ovsdb:"cfm_health"` + CFMMpid *int `ovsdb:"cfm_mpid"` + CFMRemoteMpids []int `ovsdb:"cfm_remote_mpids"` + CFMRemoteOpstate *InterfaceCFMRemoteOpstate `ovsdb:"cfm_remote_opstate"` + Duplex *InterfaceDuplex `ovsdb:"duplex"` + Error *string `ovsdb:"error"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Ifindex *int `ovsdb:"ifindex"` + IngressPolicingBurst int `ovsdb:"ingress_policing_burst"` + IngressPolicingKpktsBurst int `ovsdb:"ingress_policing_kpkts_burst"` + IngressPolicingKpktsRate int `ovsdb:"ingress_policing_kpkts_rate"` + IngressPolicingRate int `ovsdb:"ingress_policing_rate"` + LACPCurrent *bool `ovsdb:"lacp_current"` + LinkResets *int `ovsdb:"link_resets"` + LinkSpeed *int `ovsdb:"link_speed"` + LinkState *InterfaceLinkState `ovsdb:"link_state"` + LLDP map[string]string `ovsdb:"lldp"` + MAC *string `ovsdb:"mac"` + MACInUse *string `ovsdb:"mac_in_use"` + MTU *int `ovsdb:"mtu"` + MTURequest *int `ovsdb:"mtu_request"` + Name string `ovsdb:"name"` + Ofport *int `ovsdb:"ofport"` + OfportRequest *int `ovsdb:"ofport_request"` + Options map[string]string `ovsdb:"options"` + OtherConfig map[string]string `ovsdb:"other_config"` + Statistics map[string]int `ovsdb:"statistics"` + Status map[string]string `ovsdb:"status"` + Type string `ovsdb:"type"` +} + +func (a *Interface) GetUUID() string { + return a.UUID +} + +func (a *Interface) GetAdminState() *InterfaceAdminState { + return a.AdminState +} + +func copyInterfaceAdminState(a *InterfaceAdminState) *InterfaceAdminState { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalInterfaceAdminState(a, b *InterfaceAdminState) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Interface) GetBFD() map[string]string { + return a.BFD +} + +func copyInterfaceBFD(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalInterfaceBFD(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Interface) GetBFDStatus() map[string]string { + return a.BFDStatus +} + +func copyInterfaceBFDStatus(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalInterfaceBFDStatus(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Interface) GetCFMFault() *bool { + return a.CFMFault +} + +func copyInterfaceCFMFault(a *bool) *bool { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalInterfaceCFMFault(a, b *bool) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Interface) GetCFMFaultStatus() []string { + return a.CFMFaultStatus +} + +func copyInterfaceCFMFaultStatus(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalInterfaceCFMFaultStatus(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Interface) GetCFMFlapCount() *int { + return a.CFMFlapCount +} + +func copyInterfaceCFMFlapCount(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalInterfaceCFMFlapCount(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Interface) GetCFMHealth() *int { + return a.CFMHealth +} + +func copyInterfaceCFMHealth(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalInterfaceCFMHealth(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Interface) GetCFMMpid() *int { + return a.CFMMpid +} + +func copyInterfaceCFMMpid(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalInterfaceCFMMpid(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Interface) GetCFMRemoteMpids() []int { + return a.CFMRemoteMpids +} + +func copyInterfaceCFMRemoteMpids(a []int) []int { + if a == nil { + return nil + } + b := make([]int, len(a)) + copy(b, a) + return b +} + +func equalInterfaceCFMRemoteMpids(a, b []int) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Interface) GetCFMRemoteOpstate() *InterfaceCFMRemoteOpstate { + return a.CFMRemoteOpstate +} + +func copyInterfaceCFMRemoteOpstate(a *InterfaceCFMRemoteOpstate) *InterfaceCFMRemoteOpstate { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalInterfaceCFMRemoteOpstate(a, b *InterfaceCFMRemoteOpstate) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Interface) GetDuplex() *InterfaceDuplex { + return a.Duplex +} + +func copyInterfaceDuplex(a *InterfaceDuplex) *InterfaceDuplex { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalInterfaceDuplex(a, b *InterfaceDuplex) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Interface) GetError() *string { + return a.Error +} + +func copyInterfaceError(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalInterfaceError(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Interface) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyInterfaceExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalInterfaceExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Interface) GetIfindex() *int { + return a.Ifindex +} + +func copyInterfaceIfindex(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalInterfaceIfindex(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Interface) GetIngressPolicingBurst() int { + return a.IngressPolicingBurst +} + +func (a *Interface) GetIngressPolicingKpktsBurst() int { + return a.IngressPolicingKpktsBurst +} + +func (a *Interface) GetIngressPolicingKpktsRate() int { + return a.IngressPolicingKpktsRate +} + +func (a *Interface) GetIngressPolicingRate() int { + return a.IngressPolicingRate +} + +func (a *Interface) GetLACPCurrent() *bool { + return a.LACPCurrent +} + +func copyInterfaceLACPCurrent(a *bool) *bool { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalInterfaceLACPCurrent(a, b *bool) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Interface) GetLinkResets() *int { + return a.LinkResets +} + +func copyInterfaceLinkResets(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalInterfaceLinkResets(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Interface) GetLinkSpeed() *int { + return a.LinkSpeed +} + +func copyInterfaceLinkSpeed(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalInterfaceLinkSpeed(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Interface) GetLinkState() *InterfaceLinkState { + return a.LinkState +} + +func copyInterfaceLinkState(a *InterfaceLinkState) *InterfaceLinkState { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalInterfaceLinkState(a, b *InterfaceLinkState) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Interface) GetLLDP() map[string]string { + return a.LLDP +} + +func copyInterfaceLLDP(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalInterfaceLLDP(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Interface) GetMAC() *string { + return a.MAC +} + +func copyInterfaceMAC(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalInterfaceMAC(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Interface) GetMACInUse() *string { + return a.MACInUse +} + +func copyInterfaceMACInUse(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalInterfaceMACInUse(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Interface) GetMTU() *int { + return a.MTU +} + +func copyInterfaceMTU(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalInterfaceMTU(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Interface) GetMTURequest() *int { + return a.MTURequest +} + +func copyInterfaceMTURequest(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalInterfaceMTURequest(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Interface) GetName() string { + return a.Name +} + +func (a *Interface) GetOfport() *int { + return a.Ofport +} + +func copyInterfaceOfport(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalInterfaceOfport(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Interface) GetOfportRequest() *int { + return a.OfportRequest +} + +func copyInterfaceOfportRequest(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalInterfaceOfportRequest(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Interface) GetOptions() map[string]string { + return a.Options +} + +func copyInterfaceOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalInterfaceOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Interface) GetOtherConfig() map[string]string { + return a.OtherConfig +} + +func copyInterfaceOtherConfig(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalInterfaceOtherConfig(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Interface) GetStatistics() map[string]int { + return a.Statistics +} + +func copyInterfaceStatistics(a map[string]int) map[string]int { + if a == nil { + return nil + } + b := make(map[string]int, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalInterfaceStatistics(a, b map[string]int) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Interface) GetStatus() map[string]string { + return a.Status +} + +func copyInterfaceStatus(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalInterfaceStatus(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Interface) GetType() string { + return a.Type +} + +func (a *Interface) DeepCopyInto(b *Interface) { + *b = *a + b.AdminState = copyInterfaceAdminState(a.AdminState) + b.BFD = copyInterfaceBFD(a.BFD) + b.BFDStatus = copyInterfaceBFDStatus(a.BFDStatus) + b.CFMFault = copyInterfaceCFMFault(a.CFMFault) + b.CFMFaultStatus = copyInterfaceCFMFaultStatus(a.CFMFaultStatus) + b.CFMFlapCount = copyInterfaceCFMFlapCount(a.CFMFlapCount) + b.CFMHealth = copyInterfaceCFMHealth(a.CFMHealth) + b.CFMMpid = copyInterfaceCFMMpid(a.CFMMpid) + b.CFMRemoteMpids = copyInterfaceCFMRemoteMpids(a.CFMRemoteMpids) + b.CFMRemoteOpstate = copyInterfaceCFMRemoteOpstate(a.CFMRemoteOpstate) + b.Duplex = copyInterfaceDuplex(a.Duplex) + b.Error = copyInterfaceError(a.Error) + b.ExternalIDs = copyInterfaceExternalIDs(a.ExternalIDs) + b.Ifindex = copyInterfaceIfindex(a.Ifindex) + b.LACPCurrent = copyInterfaceLACPCurrent(a.LACPCurrent) + b.LinkResets = copyInterfaceLinkResets(a.LinkResets) + b.LinkSpeed = copyInterfaceLinkSpeed(a.LinkSpeed) + b.LinkState = copyInterfaceLinkState(a.LinkState) + b.LLDP = copyInterfaceLLDP(a.LLDP) + b.MAC = copyInterfaceMAC(a.MAC) + b.MACInUse = copyInterfaceMACInUse(a.MACInUse) + b.MTU = copyInterfaceMTU(a.MTU) + b.MTURequest = copyInterfaceMTURequest(a.MTURequest) + b.Ofport = copyInterfaceOfport(a.Ofport) + b.OfportRequest = copyInterfaceOfportRequest(a.OfportRequest) + b.Options = copyInterfaceOptions(a.Options) + b.OtherConfig = copyInterfaceOtherConfig(a.OtherConfig) + b.Statistics = copyInterfaceStatistics(a.Statistics) + b.Status = copyInterfaceStatus(a.Status) +} + +func (a *Interface) DeepCopy() *Interface { + b := new(Interface) + a.DeepCopyInto(b) + return b +} + +func (a *Interface) CloneModelInto(b model.Model) { + c := b.(*Interface) + a.DeepCopyInto(c) +} + +func (a *Interface) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Interface) Equals(b *Interface) bool { + return a.UUID == b.UUID && + equalInterfaceAdminState(a.AdminState, b.AdminState) && + equalInterfaceBFD(a.BFD, b.BFD) && + equalInterfaceBFDStatus(a.BFDStatus, b.BFDStatus) && + equalInterfaceCFMFault(a.CFMFault, b.CFMFault) && + equalInterfaceCFMFaultStatus(a.CFMFaultStatus, b.CFMFaultStatus) && + equalInterfaceCFMFlapCount(a.CFMFlapCount, b.CFMFlapCount) && + equalInterfaceCFMHealth(a.CFMHealth, b.CFMHealth) && + equalInterfaceCFMMpid(a.CFMMpid, b.CFMMpid) && + equalInterfaceCFMRemoteMpids(a.CFMRemoteMpids, b.CFMRemoteMpids) && + equalInterfaceCFMRemoteOpstate(a.CFMRemoteOpstate, b.CFMRemoteOpstate) && + equalInterfaceDuplex(a.Duplex, b.Duplex) && + equalInterfaceError(a.Error, b.Error) && + equalInterfaceExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalInterfaceIfindex(a.Ifindex, b.Ifindex) && + a.IngressPolicingBurst == b.IngressPolicingBurst && + a.IngressPolicingKpktsBurst == b.IngressPolicingKpktsBurst && + a.IngressPolicingKpktsRate == b.IngressPolicingKpktsRate && + a.IngressPolicingRate == b.IngressPolicingRate && + equalInterfaceLACPCurrent(a.LACPCurrent, b.LACPCurrent) && + equalInterfaceLinkResets(a.LinkResets, b.LinkResets) && + equalInterfaceLinkSpeed(a.LinkSpeed, b.LinkSpeed) && + equalInterfaceLinkState(a.LinkState, b.LinkState) && + equalInterfaceLLDP(a.LLDP, b.LLDP) && + equalInterfaceMAC(a.MAC, b.MAC) && + equalInterfaceMACInUse(a.MACInUse, b.MACInUse) && + equalInterfaceMTU(a.MTU, b.MTU) && + equalInterfaceMTURequest(a.MTURequest, b.MTURequest) && + a.Name == b.Name && + equalInterfaceOfport(a.Ofport, b.Ofport) && + equalInterfaceOfportRequest(a.OfportRequest, b.OfportRequest) && + equalInterfaceOptions(a.Options, b.Options) && + equalInterfaceOtherConfig(a.OtherConfig, b.OtherConfig) && + equalInterfaceStatistics(a.Statistics, b.Statistics) && + equalInterfaceStatus(a.Status, b.Status) && + a.Type == b.Type +} + +func (a *Interface) EqualsModel(b model.Model) bool { + c := b.(*Interface) + return a.Equals(c) +} + +var _ model.CloneableModel = &Interface{} +var _ model.ComparableModel = &Interface{} diff --git a/go-controller/pkg/vswitchd/ipfix.go b/go-controller/pkg/vswitchd/ipfix.go new file mode 100644 index 0000000000..72b5d3915c --- /dev/null +++ b/go-controller/pkg/vswitchd/ipfix.go @@ -0,0 +1,270 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package vswitchd + +import "github.com/ovn-org/libovsdb/model" + +const IPFIXTable = "IPFIX" + +// IPFIX defines an object in IPFIX table +type IPFIX struct { + UUID string `ovsdb:"_uuid"` + CacheActiveTimeout *int `ovsdb:"cache_active_timeout"` + CacheMaxFlows *int `ovsdb:"cache_max_flows"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + ObsDomainID *int `ovsdb:"obs_domain_id"` + ObsPointID *int `ovsdb:"obs_point_id"` + OtherConfig map[string]string `ovsdb:"other_config"` + Sampling *int `ovsdb:"sampling"` + Targets []string `ovsdb:"targets"` +} + +func (a *IPFIX) GetUUID() string { + return a.UUID +} + +func (a *IPFIX) GetCacheActiveTimeout() *int { + return a.CacheActiveTimeout +} + +func copyIPFIXCacheActiveTimeout(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalIPFIXCacheActiveTimeout(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *IPFIX) GetCacheMaxFlows() *int { + return a.CacheMaxFlows +} + +func copyIPFIXCacheMaxFlows(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalIPFIXCacheMaxFlows(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *IPFIX) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyIPFIXExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalIPFIXExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *IPFIX) GetObsDomainID() *int { + return a.ObsDomainID +} + +func copyIPFIXObsDomainID(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalIPFIXObsDomainID(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *IPFIX) GetObsPointID() *int { + return a.ObsPointID +} + +func copyIPFIXObsPointID(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalIPFIXObsPointID(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *IPFIX) GetOtherConfig() map[string]string { + return a.OtherConfig +} + +func copyIPFIXOtherConfig(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalIPFIXOtherConfig(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *IPFIX) GetSampling() *int { + return a.Sampling +} + +func copyIPFIXSampling(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalIPFIXSampling(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *IPFIX) GetTargets() []string { + return a.Targets +} + +func copyIPFIXTargets(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalIPFIXTargets(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *IPFIX) DeepCopyInto(b *IPFIX) { + *b = *a + b.CacheActiveTimeout = copyIPFIXCacheActiveTimeout(a.CacheActiveTimeout) + b.CacheMaxFlows = copyIPFIXCacheMaxFlows(a.CacheMaxFlows) + b.ExternalIDs = copyIPFIXExternalIDs(a.ExternalIDs) + b.ObsDomainID = copyIPFIXObsDomainID(a.ObsDomainID) + b.ObsPointID = copyIPFIXObsPointID(a.ObsPointID) + b.OtherConfig = copyIPFIXOtherConfig(a.OtherConfig) + b.Sampling = copyIPFIXSampling(a.Sampling) + b.Targets = copyIPFIXTargets(a.Targets) +} + +func (a *IPFIX) DeepCopy() *IPFIX { + b := new(IPFIX) + a.DeepCopyInto(b) + return b +} + +func (a *IPFIX) CloneModelInto(b model.Model) { + c := b.(*IPFIX) + a.DeepCopyInto(c) +} + +func (a *IPFIX) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *IPFIX) Equals(b *IPFIX) bool { + return a.UUID == b.UUID && + equalIPFIXCacheActiveTimeout(a.CacheActiveTimeout, b.CacheActiveTimeout) && + equalIPFIXCacheMaxFlows(a.CacheMaxFlows, b.CacheMaxFlows) && + equalIPFIXExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalIPFIXObsDomainID(a.ObsDomainID, b.ObsDomainID) && + equalIPFIXObsPointID(a.ObsPointID, b.ObsPointID) && + equalIPFIXOtherConfig(a.OtherConfig, b.OtherConfig) && + equalIPFIXSampling(a.Sampling, b.Sampling) && + equalIPFIXTargets(a.Targets, b.Targets) +} + +func (a *IPFIX) EqualsModel(b model.Model) bool { + c := b.(*IPFIX) + return a.Equals(c) +} + +var _ model.CloneableModel = &IPFIX{} +var _ model.ComparableModel = &IPFIX{} diff --git a/go-controller/pkg/vswitchd/manager.go b/go-controller/pkg/vswitchd/manager.go new file mode 100644 index 0000000000..ff1df96caa --- /dev/null +++ b/go-controller/pkg/vswitchd/manager.go @@ -0,0 +1,243 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package vswitchd + +import "github.com/ovn-org/libovsdb/model" + +const ManagerTable = "Manager" + +type ( + ManagerConnectionMode = string +) + +var ( + ManagerConnectionModeInBand ManagerConnectionMode = "in-band" + ManagerConnectionModeOutOfBand ManagerConnectionMode = "out-of-band" +) + +// Manager defines an object in Manager table +type Manager struct { + UUID string `ovsdb:"_uuid"` + ConnectionMode *ManagerConnectionMode `ovsdb:"connection_mode"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + InactivityProbe *int `ovsdb:"inactivity_probe"` + IsConnected bool `ovsdb:"is_connected"` + MaxBackoff *int `ovsdb:"max_backoff"` + OtherConfig map[string]string `ovsdb:"other_config"` + Status map[string]string `ovsdb:"status"` + Target string `ovsdb:"target"` +} + +func (a *Manager) GetUUID() string { + return a.UUID +} + +func (a *Manager) GetConnectionMode() *ManagerConnectionMode { + return a.ConnectionMode +} + +func copyManagerConnectionMode(a *ManagerConnectionMode) *ManagerConnectionMode { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalManagerConnectionMode(a, b *ManagerConnectionMode) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Manager) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyManagerExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalManagerExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Manager) GetInactivityProbe() *int { + return a.InactivityProbe +} + +func copyManagerInactivityProbe(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalManagerInactivityProbe(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Manager) GetIsConnected() bool { + return a.IsConnected +} + +func (a *Manager) GetMaxBackoff() *int { + return a.MaxBackoff +} + +func copyManagerMaxBackoff(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalManagerMaxBackoff(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Manager) GetOtherConfig() map[string]string { + return a.OtherConfig +} + +func copyManagerOtherConfig(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalManagerOtherConfig(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Manager) GetStatus() map[string]string { + return a.Status +} + +func copyManagerStatus(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalManagerStatus(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Manager) GetTarget() string { + return a.Target +} + +func (a *Manager) DeepCopyInto(b *Manager) { + *b = *a + b.ConnectionMode = copyManagerConnectionMode(a.ConnectionMode) + b.ExternalIDs = copyManagerExternalIDs(a.ExternalIDs) + b.InactivityProbe = copyManagerInactivityProbe(a.InactivityProbe) + b.MaxBackoff = copyManagerMaxBackoff(a.MaxBackoff) + b.OtherConfig = copyManagerOtherConfig(a.OtherConfig) + b.Status = copyManagerStatus(a.Status) +} + +func (a *Manager) DeepCopy() *Manager { + b := new(Manager) + a.DeepCopyInto(b) + return b +} + +func (a *Manager) CloneModelInto(b model.Model) { + c := b.(*Manager) + a.DeepCopyInto(c) +} + +func (a *Manager) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Manager) Equals(b *Manager) bool { + return a.UUID == b.UUID && + equalManagerConnectionMode(a.ConnectionMode, b.ConnectionMode) && + equalManagerExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalManagerInactivityProbe(a.InactivityProbe, b.InactivityProbe) && + a.IsConnected == b.IsConnected && + equalManagerMaxBackoff(a.MaxBackoff, b.MaxBackoff) && + equalManagerOtherConfig(a.OtherConfig, b.OtherConfig) && + equalManagerStatus(a.Status, b.Status) && + a.Target == b.Target +} + +func (a *Manager) EqualsModel(b model.Model) bool { + c := b.(*Manager) + return a.Equals(c) +} + +var _ model.CloneableModel = &Manager{} +var _ model.ComparableModel = &Manager{} diff --git a/go-controller/pkg/vswitchd/mirror.go b/go-controller/pkg/vswitchd/mirror.go new file mode 100644 index 0000000000..044455d253 --- /dev/null +++ b/go-controller/pkg/vswitchd/mirror.go @@ -0,0 +1,294 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package vswitchd + +import "github.com/ovn-org/libovsdb/model" + +const MirrorTable = "Mirror" + +// Mirror defines an object in Mirror table +type Mirror struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Name string `ovsdb:"name"` + OutputPort *string `ovsdb:"output_port"` + OutputVLAN *int `ovsdb:"output_vlan"` + SelectAll bool `ovsdb:"select_all"` + SelectDstPort []string `ovsdb:"select_dst_port"` + SelectSrcPort []string `ovsdb:"select_src_port"` + SelectVLAN []int `ovsdb:"select_vlan"` + Snaplen *int `ovsdb:"snaplen"` + Statistics map[string]int `ovsdb:"statistics"` +} + +func (a *Mirror) GetUUID() string { + return a.UUID +} + +func (a *Mirror) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyMirrorExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalMirrorExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Mirror) GetName() string { + return a.Name +} + +func (a *Mirror) GetOutputPort() *string { + return a.OutputPort +} + +func copyMirrorOutputPort(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalMirrorOutputPort(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Mirror) GetOutputVLAN() *int { + return a.OutputVLAN +} + +func copyMirrorOutputVLAN(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalMirrorOutputVLAN(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Mirror) GetSelectAll() bool { + return a.SelectAll +} + +func (a *Mirror) GetSelectDstPort() []string { + return a.SelectDstPort +} + +func copyMirrorSelectDstPort(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalMirrorSelectDstPort(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Mirror) GetSelectSrcPort() []string { + return a.SelectSrcPort +} + +func copyMirrorSelectSrcPort(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalMirrorSelectSrcPort(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Mirror) GetSelectVLAN() []int { + return a.SelectVLAN +} + +func copyMirrorSelectVLAN(a []int) []int { + if a == nil { + return nil + } + b := make([]int, len(a)) + copy(b, a) + return b +} + +func equalMirrorSelectVLAN(a, b []int) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Mirror) GetSnaplen() *int { + return a.Snaplen +} + +func copyMirrorSnaplen(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalMirrorSnaplen(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Mirror) GetStatistics() map[string]int { + return a.Statistics +} + +func copyMirrorStatistics(a map[string]int) map[string]int { + if a == nil { + return nil + } + b := make(map[string]int, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalMirrorStatistics(a, b map[string]int) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Mirror) DeepCopyInto(b *Mirror) { + *b = *a + b.ExternalIDs = copyMirrorExternalIDs(a.ExternalIDs) + b.OutputPort = copyMirrorOutputPort(a.OutputPort) + b.OutputVLAN = copyMirrorOutputVLAN(a.OutputVLAN) + b.SelectDstPort = copyMirrorSelectDstPort(a.SelectDstPort) + b.SelectSrcPort = copyMirrorSelectSrcPort(a.SelectSrcPort) + b.SelectVLAN = copyMirrorSelectVLAN(a.SelectVLAN) + b.Snaplen = copyMirrorSnaplen(a.Snaplen) + b.Statistics = copyMirrorStatistics(a.Statistics) +} + +func (a *Mirror) DeepCopy() *Mirror { + b := new(Mirror) + a.DeepCopyInto(b) + return b +} + +func (a *Mirror) CloneModelInto(b model.Model) { + c := b.(*Mirror) + a.DeepCopyInto(c) +} + +func (a *Mirror) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Mirror) Equals(b *Mirror) bool { + return a.UUID == b.UUID && + equalMirrorExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Name == b.Name && + equalMirrorOutputPort(a.OutputPort, b.OutputPort) && + equalMirrorOutputVLAN(a.OutputVLAN, b.OutputVLAN) && + a.SelectAll == b.SelectAll && + equalMirrorSelectDstPort(a.SelectDstPort, b.SelectDstPort) && + equalMirrorSelectSrcPort(a.SelectSrcPort, b.SelectSrcPort) && + equalMirrorSelectVLAN(a.SelectVLAN, b.SelectVLAN) && + equalMirrorSnaplen(a.Snaplen, b.Snaplen) && + equalMirrorStatistics(a.Statistics, b.Statistics) +} + +func (a *Mirror) EqualsModel(b model.Model) bool { + c := b.(*Mirror) + return a.Equals(c) +} + +var _ model.CloneableModel = &Mirror{} +var _ model.ComparableModel = &Mirror{} diff --git a/go-controller/pkg/vswitchd/model.go b/go-controller/pkg/vswitchd/model.go new file mode 100644 index 0000000000..c862f04277 --- /dev/null +++ b/go-controller/pkg/vswitchd/model.go @@ -0,0 +1,2012 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package vswitchd + +import ( + "encoding/json" + + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" +) + +// FullDatabaseModel returns the DatabaseModel object to be used in libovsdb +func FullDatabaseModel() (model.ClientDBModel, error) { + return model.NewClientDBModel("Open_vSwitch", map[string]model.Model{ + "AutoAttach": &AutoAttach{}, + "Bridge": &Bridge{}, + "CT_Timeout_Policy": &CTTimeoutPolicy{}, + "CT_Zone": &CTZone{}, + "Controller": &Controller{}, + "Datapath": &Datapath{}, + "Flow_Sample_Collector_Set": &FlowSampleCollectorSet{}, + "Flow_Table": &FlowTable{}, + "IPFIX": &IPFIX{}, + "Interface": &Interface{}, + "Manager": &Manager{}, + "Mirror": &Mirror{}, + "NetFlow": &NetFlow{}, + "Open_vSwitch": &OpenvSwitch{}, + "Port": &Port{}, + "QoS": &QoS{}, + "Queue": &Queue{}, + "SSL": &SSL{}, + "sFlow": &SFlow{}, + }) +} + +var schema = `{ + "name": "Open_vSwitch", + "version": "8.3.0", + "tables": { + "AutoAttach": { + "columns": { + "mappings": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 16777215 + }, + "value": { + "type": "integer", + "minInteger": 0, + "maxInteger": 4095 + }, + "min": 0, + "max": "unlimited" + } + }, + "system_description": { + "type": "string" + }, + "system_name": { + "type": "string" + } + } + }, + "Bridge": { + "columns": { + "auto_attach": { + "type": { + "key": { + "type": "uuid", + "refTable": "AutoAttach" + }, + "min": 0, + "max": 1 + } + }, + "controller": { + "type": { + "key": { + "type": "uuid", + "refTable": "Controller" + }, + "min": 0, + "max": "unlimited" + } + }, + "datapath_id": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + }, + "ephemeral": true + }, + "datapath_type": { + "type": "string" + }, + "datapath_version": { + "type": "string" + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "fail_mode": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "standalone", + "secure" + ] + ] + }, + "min": 0, + "max": 1 + } + }, + "flood_vlans": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 4095 + }, + "min": 0, + "max": 4096 + } + }, + "flow_tables": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 254 + }, + "value": { + "type": "uuid", + "refTable": "Flow_Table" + }, + "min": 0, + "max": "unlimited" + } + }, + "ipfix": { + "type": { + "key": { + "type": "uuid", + "refTable": "IPFIX" + }, + "min": 0, + "max": 1 + } + }, + "mcast_snooping_enable": { + "type": "boolean" + }, + "mirrors": { + "type": { + "key": { + "type": "uuid", + "refTable": "Mirror" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string", + "mutable": false + }, + "netflow": { + "type": { + "key": { + "type": "uuid", + "refTable": "NetFlow" + }, + "min": 0, + "max": 1 + } + }, + "other_config": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "ports": { + "type": { + "key": { + "type": "uuid", + "refTable": "Port" + }, + "min": 0, + "max": "unlimited" + } + }, + "protocols": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "OpenFlow10", + "OpenFlow11", + "OpenFlow12", + "OpenFlow13", + "OpenFlow14", + "OpenFlow15" + ] + ] + }, + "min": 0, + "max": "unlimited" + } + }, + "rstp_enable": { + "type": "boolean" + }, + "rstp_status": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + }, + "ephemeral": true + }, + "sflow": { + "type": { + "key": { + "type": "uuid", + "refTable": "sFlow" + }, + "min": 0, + "max": 1 + } + }, + "status": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + }, + "ephemeral": true + }, + "stp_enable": { + "type": "boolean" + } + }, + "indexes": [ + [ + "name" + ] + ] + }, + "CT_Timeout_Policy": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "timeouts": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "tcp_syn_sent", + "tcp_syn_recv", + "tcp_established", + "tcp_fin_wait", + "tcp_close_wait", + "tcp_last_ack", + "tcp_time_wait", + "tcp_close", + "tcp_syn_sent2", + "tcp_retransmit", + "tcp_unack", + "udp_first", + "udp_single", + "udp_multiple", + "icmp_first", + "icmp_reply" + ] + ] + }, + "value": { + "type": "integer", + "minInteger": 0, + "maxInteger": 4294967295 + }, + "min": 0, + "max": "unlimited" + } + } + } + }, + "CT_Zone": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "timeout_policy": { + "type": { + "key": { + "type": "uuid", + "refTable": "CT_Timeout_Policy" + }, + "min": 0, + "max": 1 + } + } + } + }, + "Controller": { + "columns": { + "connection_mode": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "in-band", + "out-of-band" + ] + ] + }, + "min": 0, + "max": 1 + } + }, + "controller_burst_limit": { + "type": { + "key": { + "type": "integer", + "minInteger": 25 + }, + "min": 0, + "max": 1 + } + }, + "controller_queue_size": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 512 + }, + "min": 0, + "max": 1 + } + }, + "controller_rate_limit": { + "type": { + "key": { + "type": "integer", + "minInteger": 100 + }, + "min": 0, + "max": 1 + } + }, + "enable_async_messages": { + "type": { + "key": { + "type": "boolean" + }, + "min": 0, + "max": 1 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "inactivity_probe": { + "type": { + "key": { + "type": "integer" + }, + "min": 0, + "max": 1 + } + }, + "is_connected": { + "type": "boolean", + "ephemeral": true + }, + "local_gateway": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "local_ip": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "local_netmask": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "max_backoff": { + "type": { + "key": { + "type": "integer", + "minInteger": 1000 + }, + "min": 0, + "max": 1 + } + }, + "other_config": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "role": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "other", + "master", + "slave" + ] + ] + }, + "min": 0, + "max": 1 + }, + "ephemeral": true + }, + "status": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + }, + "ephemeral": true + }, + "target": { + "type": "string" + }, + "type": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "primary", + "service" + ] + ] + }, + "min": 0, + "max": 1 + } + } + } + }, + "Datapath": { + "columns": { + "capabilities": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "ct_zones": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 65535 + }, + "value": { + "type": "uuid", + "refTable": "CT_Zone" + }, + "min": 0, + "max": "unlimited" + } + }, + "datapath_version": { + "type": "string" + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + } + } + }, + "Flow_Sample_Collector_Set": { + "columns": { + "bridge": { + "type": { + "key": { + "type": "uuid", + "refTable": "Bridge" + }, + "min": 1, + "max": 1 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "id": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 4294967295 + }, + "min": 1, + "max": 1 + } + }, + "ipfix": { + "type": { + "key": { + "type": "uuid", + "refTable": "IPFIX" + }, + "min": 0, + "max": 1 + } + } + }, + "indexes": [ + [ + "id", + "bridge" + ] + ], + "isRoot": true + }, + "Flow_Table": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "flow_limit": { + "type": { + "key": { + "type": "integer", + "minInteger": 0 + }, + "min": 0, + "max": 1 + } + }, + "groups": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "overflow_policy": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "refuse", + "evict" + ] + ] + }, + "min": 0, + "max": 1 + } + }, + "prefixes": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 3 + } + } + } + }, + "IPFIX": { + "columns": { + "cache_active_timeout": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 4200 + }, + "min": 0, + "max": 1 + } + }, + "cache_max_flows": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 4294967295 + }, + "min": 0, + "max": 1 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "obs_domain_id": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 4294967295 + }, + "min": 0, + "max": 1 + } + }, + "obs_point_id": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 4294967295 + }, + "min": 0, + "max": 1 + } + }, + "other_config": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "sampling": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 4294967295 + }, + "min": 0, + "max": 1 + } + }, + "targets": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + } + } + }, + "Interface": { + "columns": { + "admin_state": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "up", + "down" + ] + ] + }, + "min": 0, + "max": 1 + }, + "ephemeral": true + }, + "bfd": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "bfd_status": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "cfm_fault": { + "type": { + "key": { + "type": "boolean" + }, + "min": 0, + "max": 1 + }, + "ephemeral": true + }, + "cfm_fault_status": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + }, + "ephemeral": true + }, + "cfm_flap_count": { + "type": { + "key": { + "type": "integer" + }, + "min": 0, + "max": 1 + } + }, + "cfm_health": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 100 + }, + "min": 0, + "max": 1 + }, + "ephemeral": true + }, + "cfm_mpid": { + "type": { + "key": { + "type": "integer" + }, + "min": 0, + "max": 1 + } + }, + "cfm_remote_mpids": { + "type": { + "key": { + "type": "integer" + }, + "min": 0, + "max": "unlimited" + }, + "ephemeral": true + }, + "cfm_remote_opstate": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "up", + "down" + ] + ] + }, + "min": 0, + "max": 1 + }, + "ephemeral": true + }, + "duplex": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "half", + "full" + ] + ] + }, + "min": 0, + "max": 1 + }, + "ephemeral": true + }, + "error": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "ifindex": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 4294967295 + }, + "min": 0, + "max": 1 + }, + "ephemeral": true + }, + "ingress_policing_burst": { + "type": { + "key": { + "type": "integer", + "minInteger": 0 + } + } + }, + "ingress_policing_kpkts_burst": { + "type": { + "key": { + "type": "integer", + "minInteger": 0 + } + } + }, + "ingress_policing_kpkts_rate": { + "type": { + "key": { + "type": "integer", + "minInteger": 0 + } + } + }, + "ingress_policing_rate": { + "type": { + "key": { + "type": "integer", + "minInteger": 0 + } + } + }, + "lacp_current": { + "type": { + "key": { + "type": "boolean" + }, + "min": 0, + "max": 1 + }, + "ephemeral": true + }, + "link_resets": { + "type": { + "key": { + "type": "integer" + }, + "min": 0, + "max": 1 + }, + "ephemeral": true + }, + "link_speed": { + "type": { + "key": { + "type": "integer" + }, + "min": 0, + "max": 1 + }, + "ephemeral": true + }, + "link_state": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "up", + "down" + ] + ] + }, + "min": 0, + "max": 1 + }, + "ephemeral": true + }, + "lldp": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "mac": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "mac_in_use": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + }, + "ephemeral": true + }, + "mtu": { + "type": { + "key": { + "type": "integer" + }, + "min": 0, + "max": 1 + }, + "ephemeral": true + }, + "mtu_request": { + "type": { + "key": { + "type": "integer", + "minInteger": 1 + }, + "min": 0, + "max": 1 + } + }, + "name": { + "type": "string", + "mutable": false + }, + "ofport": { + "type": { + "key": { + "type": "integer" + }, + "min": 0, + "max": 1 + } + }, + "ofport_request": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 65279 + }, + "min": 0, + "max": 1 + } + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "other_config": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "statistics": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "integer" + }, + "min": 0, + "max": "unlimited" + }, + "ephemeral": true + }, + "status": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + }, + "ephemeral": true + }, + "type": { + "type": "string" + } + }, + "indexes": [ + [ + "name" + ] + ] + }, + "Manager": { + "columns": { + "connection_mode": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "in-band", + "out-of-band" + ] + ] + }, + "min": 0, + "max": 1 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "inactivity_probe": { + "type": { + "key": { + "type": "integer" + }, + "min": 0, + "max": 1 + } + }, + "is_connected": { + "type": "boolean", + "ephemeral": true + }, + "max_backoff": { + "type": { + "key": { + "type": "integer", + "minInteger": 1000 + }, + "min": 0, + "max": 1 + } + }, + "other_config": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "status": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + }, + "ephemeral": true + }, + "target": { + "type": "string" + } + }, + "indexes": [ + [ + "target" + ] + ] + }, + "Mirror": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + }, + "output_port": { + "type": { + "key": { + "type": "uuid", + "refTable": "Port", + "refType": "weak" + }, + "min": 0, + "max": 1 + } + }, + "output_vlan": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 4095 + }, + "min": 0, + "max": 1 + } + }, + "select_all": { + "type": "boolean" + }, + "select_dst_port": { + "type": { + "key": { + "type": "uuid", + "refTable": "Port", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + }, + "select_src_port": { + "type": { + "key": { + "type": "uuid", + "refTable": "Port", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + }, + "select_vlan": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 4095 + }, + "min": 0, + "max": 4096 + } + }, + "snaplen": { + "type": { + "key": { + "type": "integer", + "minInteger": 14, + "maxInteger": 65535 + }, + "min": 0, + "max": 1 + } + }, + "statistics": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "integer" + }, + "min": 0, + "max": "unlimited" + }, + "ephemeral": true + } + } + }, + "NetFlow": { + "columns": { + "active_timeout": { + "type": { + "key": { + "type": "integer", + "minInteger": -1 + } + } + }, + "add_id_to_interface": { + "type": "boolean" + }, + "engine_id": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 255 + }, + "min": 0, + "max": 1 + } + }, + "engine_type": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 255 + }, + "min": 0, + "max": 1 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "targets": { + "type": { + "key": { + "type": "string" + }, + "min": 1, + "max": "unlimited" + } + } + } + }, + "Open_vSwitch": { + "columns": { + "bridges": { + "type": { + "key": { + "type": "uuid", + "refTable": "Bridge" + }, + "min": 0, + "max": "unlimited" + } + }, + "cur_cfg": { + "type": "integer" + }, + "datapath_types": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "datapaths": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "uuid", + "refTable": "Datapath" + }, + "min": 0, + "max": "unlimited" + } + }, + "db_version": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "dpdk_initialized": { + "type": "boolean" + }, + "dpdk_version": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "iface_types": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "manager_options": { + "type": { + "key": { + "type": "uuid", + "refTable": "Manager" + }, + "min": 0, + "max": "unlimited" + } + }, + "next_cfg": { + "type": "integer" + }, + "other_config": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "ovs_version": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "ssl": { + "type": { + "key": { + "type": "uuid", + "refTable": "SSL" + }, + "min": 0, + "max": 1 + } + }, + "statistics": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + }, + "ephemeral": true + }, + "system_type": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "system_version": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + } + }, + "isRoot": true + }, + "Port": { + "columns": { + "bond_active_slave": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "bond_downdelay": { + "type": "integer" + }, + "bond_fake_iface": { + "type": "boolean" + }, + "bond_mode": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "balance-tcp", + "balance-slb", + "active-backup" + ] + ] + }, + "min": 0, + "max": 1 + } + }, + "bond_updelay": { + "type": "integer" + }, + "cvlans": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 4095 + }, + "min": 0, + "max": 4096 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "fake_bridge": { + "type": "boolean" + }, + "interfaces": { + "type": { + "key": { + "type": "uuid", + "refTable": "Interface" + }, + "min": 1, + "max": "unlimited" + } + }, + "lacp": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "active", + "passive", + "off" + ] + ] + }, + "min": 0, + "max": 1 + } + }, + "mac": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "name": { + "type": "string", + "mutable": false + }, + "other_config": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "protected": { + "type": "boolean" + }, + "qos": { + "type": { + "key": { + "type": "uuid", + "refTable": "QoS" + }, + "min": 0, + "max": 1 + } + }, + "rstp_statistics": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "integer" + }, + "min": 0, + "max": "unlimited" + }, + "ephemeral": true + }, + "rstp_status": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + }, + "ephemeral": true + }, + "statistics": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "integer" + }, + "min": 0, + "max": "unlimited" + }, + "ephemeral": true + }, + "status": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + }, + "ephemeral": true + }, + "tag": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 4095 + }, + "min": 0, + "max": 1 + } + }, + "trunks": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 4095 + }, + "min": 0, + "max": 4096 + } + }, + "vlan_mode": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "trunk", + "access", + "native-tagged", + "native-untagged", + "dot1q-tunnel" + ] + ] + }, + "min": 0, + "max": 1 + } + } + }, + "indexes": [ + [ + "name" + ] + ] + }, + "QoS": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "other_config": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "queues": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 4294967295 + }, + "value": { + "type": "uuid", + "refTable": "Queue" + }, + "min": 0, + "max": "unlimited" + } + }, + "type": { + "type": "string" + } + }, + "isRoot": true + }, + "Queue": { + "columns": { + "dscp": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 63 + }, + "min": 0, + "max": 1 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "other_config": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "isRoot": true + }, + "SSL": { + "columns": { + "bootstrap_ca_cert": { + "type": "boolean" + }, + "ca_cert": { + "type": "string" + }, + "certificate": { + "type": "string" + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "private_key": { + "type": "string" + } + } + }, + "sFlow": { + "columns": { + "agent": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "header": { + "type": { + "key": { + "type": "integer" + }, + "min": 0, + "max": 1 + } + }, + "polling": { + "type": { + "key": { + "type": "integer" + }, + "min": 0, + "max": 1 + } + }, + "sampling": { + "type": { + "key": { + "type": "integer" + }, + "min": 0, + "max": 1 + } + }, + "targets": { + "type": { + "key": { + "type": "string" + }, + "min": 1, + "max": "unlimited" + } + } + } + } + } +}` + +func Schema() ovsdb.DatabaseSchema { + var s ovsdb.DatabaseSchema + err := json.Unmarshal([]byte(schema), &s) + if err != nil { + panic(err) + } + return s +} diff --git a/go-controller/pkg/vswitchd/netflow.go b/go-controller/pkg/vswitchd/netflow.go new file mode 100644 index 0000000000..f958587044 --- /dev/null +++ b/go-controller/pkg/vswitchd/netflow.go @@ -0,0 +1,174 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package vswitchd + +import "github.com/ovn-org/libovsdb/model" + +const NetFlowTable = "NetFlow" + +// NetFlow defines an object in NetFlow table +type NetFlow struct { + UUID string `ovsdb:"_uuid"` + ActiveTimeout int `ovsdb:"active_timeout"` + AddIDToInterface bool `ovsdb:"add_id_to_interface"` + EngineID *int `ovsdb:"engine_id"` + EngineType *int `ovsdb:"engine_type"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Targets []string `ovsdb:"targets"` +} + +func (a *NetFlow) GetUUID() string { + return a.UUID +} + +func (a *NetFlow) GetActiveTimeout() int { + return a.ActiveTimeout +} + +func (a *NetFlow) GetAddIDToInterface() bool { + return a.AddIDToInterface +} + +func (a *NetFlow) GetEngineID() *int { + return a.EngineID +} + +func copyNetFlowEngineID(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalNetFlowEngineID(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *NetFlow) GetEngineType() *int { + return a.EngineType +} + +func copyNetFlowEngineType(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalNetFlowEngineType(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *NetFlow) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyNetFlowExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalNetFlowExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *NetFlow) GetTargets() []string { + return a.Targets +} + +func copyNetFlowTargets(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalNetFlowTargets(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *NetFlow) DeepCopyInto(b *NetFlow) { + *b = *a + b.EngineID = copyNetFlowEngineID(a.EngineID) + b.EngineType = copyNetFlowEngineType(a.EngineType) + b.ExternalIDs = copyNetFlowExternalIDs(a.ExternalIDs) + b.Targets = copyNetFlowTargets(a.Targets) +} + +func (a *NetFlow) DeepCopy() *NetFlow { + b := new(NetFlow) + a.DeepCopyInto(b) + return b +} + +func (a *NetFlow) CloneModelInto(b model.Model) { + c := b.(*NetFlow) + a.DeepCopyInto(c) +} + +func (a *NetFlow) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *NetFlow) Equals(b *NetFlow) bool { + return a.UUID == b.UUID && + a.ActiveTimeout == b.ActiveTimeout && + a.AddIDToInterface == b.AddIDToInterface && + equalNetFlowEngineID(a.EngineID, b.EngineID) && + equalNetFlowEngineType(a.EngineType, b.EngineType) && + equalNetFlowExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalNetFlowTargets(a.Targets, b.Targets) +} + +func (a *NetFlow) EqualsModel(b model.Model) bool { + c := b.(*NetFlow) + return a.Equals(c) +} + +var _ model.CloneableModel = &NetFlow{} +var _ model.ComparableModel = &NetFlow{} diff --git a/go-controller/pkg/vswitchd/open_vswitch.go b/go-controller/pkg/vswitchd/open_vswitch.go new file mode 100644 index 0000000000..e8ea481d5b --- /dev/null +++ b/go-controller/pkg/vswitchd/open_vswitch.go @@ -0,0 +1,472 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package vswitchd + +import "github.com/ovn-org/libovsdb/model" + +const OpenvSwitchTable = "Open_vSwitch" + +// OpenvSwitch defines an object in Open_vSwitch table +type OpenvSwitch struct { + UUID string `ovsdb:"_uuid"` + Bridges []string `ovsdb:"bridges"` + CurCfg int `ovsdb:"cur_cfg"` + DatapathTypes []string `ovsdb:"datapath_types"` + Datapaths map[string]string `ovsdb:"datapaths"` + DbVersion *string `ovsdb:"db_version"` + DpdkInitialized bool `ovsdb:"dpdk_initialized"` + DpdkVersion *string `ovsdb:"dpdk_version"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + IfaceTypes []string `ovsdb:"iface_types"` + ManagerOptions []string `ovsdb:"manager_options"` + NextCfg int `ovsdb:"next_cfg"` + OtherConfig map[string]string `ovsdb:"other_config"` + OVSVersion *string `ovsdb:"ovs_version"` + SSL *string `ovsdb:"ssl"` + Statistics map[string]string `ovsdb:"statistics"` + SystemType *string `ovsdb:"system_type"` + SystemVersion *string `ovsdb:"system_version"` +} + +func (a *OpenvSwitch) GetUUID() string { + return a.UUID +} + +func (a *OpenvSwitch) GetBridges() []string { + return a.Bridges +} + +func copyOpenvSwitchBridges(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalOpenvSwitchBridges(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *OpenvSwitch) GetCurCfg() int { + return a.CurCfg +} + +func (a *OpenvSwitch) GetDatapathTypes() []string { + return a.DatapathTypes +} + +func copyOpenvSwitchDatapathTypes(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalOpenvSwitchDatapathTypes(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *OpenvSwitch) GetDatapaths() map[string]string { + return a.Datapaths +} + +func copyOpenvSwitchDatapaths(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalOpenvSwitchDatapaths(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *OpenvSwitch) GetDbVersion() *string { + return a.DbVersion +} + +func copyOpenvSwitchDbVersion(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalOpenvSwitchDbVersion(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *OpenvSwitch) GetDpdkInitialized() bool { + return a.DpdkInitialized +} + +func (a *OpenvSwitch) GetDpdkVersion() *string { + return a.DpdkVersion +} + +func copyOpenvSwitchDpdkVersion(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalOpenvSwitchDpdkVersion(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *OpenvSwitch) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyOpenvSwitchExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalOpenvSwitchExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *OpenvSwitch) GetIfaceTypes() []string { + return a.IfaceTypes +} + +func copyOpenvSwitchIfaceTypes(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalOpenvSwitchIfaceTypes(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *OpenvSwitch) GetManagerOptions() []string { + return a.ManagerOptions +} + +func copyOpenvSwitchManagerOptions(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalOpenvSwitchManagerOptions(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *OpenvSwitch) GetNextCfg() int { + return a.NextCfg +} + +func (a *OpenvSwitch) GetOtherConfig() map[string]string { + return a.OtherConfig +} + +func copyOpenvSwitchOtherConfig(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalOpenvSwitchOtherConfig(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *OpenvSwitch) GetOVSVersion() *string { + return a.OVSVersion +} + +func copyOpenvSwitchOVSVersion(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalOpenvSwitchOVSVersion(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *OpenvSwitch) GetSSL() *string { + return a.SSL +} + +func copyOpenvSwitchSSL(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalOpenvSwitchSSL(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *OpenvSwitch) GetStatistics() map[string]string { + return a.Statistics +} + +func copyOpenvSwitchStatistics(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalOpenvSwitchStatistics(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *OpenvSwitch) GetSystemType() *string { + return a.SystemType +} + +func copyOpenvSwitchSystemType(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalOpenvSwitchSystemType(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *OpenvSwitch) GetSystemVersion() *string { + return a.SystemVersion +} + +func copyOpenvSwitchSystemVersion(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalOpenvSwitchSystemVersion(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *OpenvSwitch) DeepCopyInto(b *OpenvSwitch) { + *b = *a + b.Bridges = copyOpenvSwitchBridges(a.Bridges) + b.DatapathTypes = copyOpenvSwitchDatapathTypes(a.DatapathTypes) + b.Datapaths = copyOpenvSwitchDatapaths(a.Datapaths) + b.DbVersion = copyOpenvSwitchDbVersion(a.DbVersion) + b.DpdkVersion = copyOpenvSwitchDpdkVersion(a.DpdkVersion) + b.ExternalIDs = copyOpenvSwitchExternalIDs(a.ExternalIDs) + b.IfaceTypes = copyOpenvSwitchIfaceTypes(a.IfaceTypes) + b.ManagerOptions = copyOpenvSwitchManagerOptions(a.ManagerOptions) + b.OtherConfig = copyOpenvSwitchOtherConfig(a.OtherConfig) + b.OVSVersion = copyOpenvSwitchOVSVersion(a.OVSVersion) + b.SSL = copyOpenvSwitchSSL(a.SSL) + b.Statistics = copyOpenvSwitchStatistics(a.Statistics) + b.SystemType = copyOpenvSwitchSystemType(a.SystemType) + b.SystemVersion = copyOpenvSwitchSystemVersion(a.SystemVersion) +} + +func (a *OpenvSwitch) DeepCopy() *OpenvSwitch { + b := new(OpenvSwitch) + a.DeepCopyInto(b) + return b +} + +func (a *OpenvSwitch) CloneModelInto(b model.Model) { + c := b.(*OpenvSwitch) + a.DeepCopyInto(c) +} + +func (a *OpenvSwitch) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *OpenvSwitch) Equals(b *OpenvSwitch) bool { + return a.UUID == b.UUID && + equalOpenvSwitchBridges(a.Bridges, b.Bridges) && + a.CurCfg == b.CurCfg && + equalOpenvSwitchDatapathTypes(a.DatapathTypes, b.DatapathTypes) && + equalOpenvSwitchDatapaths(a.Datapaths, b.Datapaths) && + equalOpenvSwitchDbVersion(a.DbVersion, b.DbVersion) && + a.DpdkInitialized == b.DpdkInitialized && + equalOpenvSwitchDpdkVersion(a.DpdkVersion, b.DpdkVersion) && + equalOpenvSwitchExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalOpenvSwitchIfaceTypes(a.IfaceTypes, b.IfaceTypes) && + equalOpenvSwitchManagerOptions(a.ManagerOptions, b.ManagerOptions) && + a.NextCfg == b.NextCfg && + equalOpenvSwitchOtherConfig(a.OtherConfig, b.OtherConfig) && + equalOpenvSwitchOVSVersion(a.OVSVersion, b.OVSVersion) && + equalOpenvSwitchSSL(a.SSL, b.SSL) && + equalOpenvSwitchStatistics(a.Statistics, b.Statistics) && + equalOpenvSwitchSystemType(a.SystemType, b.SystemType) && + equalOpenvSwitchSystemVersion(a.SystemVersion, b.SystemVersion) +} + +func (a *OpenvSwitch) EqualsModel(b model.Model) bool { + c := b.(*OpenvSwitch) + return a.Equals(c) +} + +var _ model.CloneableModel = &OpenvSwitch{} +var _ model.ComparableModel = &OpenvSwitch{} diff --git a/go-controller/pkg/vswitchd/port.go b/go-controller/pkg/vswitchd/port.go new file mode 100644 index 0000000000..cf0ba96153 --- /dev/null +++ b/go-controller/pkg/vswitchd/port.go @@ -0,0 +1,570 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package vswitchd + +import "github.com/ovn-org/libovsdb/model" + +const PortTable = "Port" + +type ( + PortBondMode = string + PortLACP = string + PortVLANMode = string +) + +var ( + PortBondModeBalanceTCP PortBondMode = "balance-tcp" + PortBondModeBalanceSLB PortBondMode = "balance-slb" + PortBondModeActiveBackup PortBondMode = "active-backup" + PortLACPActive PortLACP = "active" + PortLACPPassive PortLACP = "passive" + PortLACPOff PortLACP = "off" + PortVLANModeTrunk PortVLANMode = "trunk" + PortVLANModeAccess PortVLANMode = "access" + PortVLANModeNativeTagged PortVLANMode = "native-tagged" + PortVLANModeNativeUntagged PortVLANMode = "native-untagged" + PortVLANModeDot1qTunnel PortVLANMode = "dot1q-tunnel" +) + +// Port defines an object in Port table +type Port struct { + UUID string `ovsdb:"_uuid"` + BondActiveSlave *string `ovsdb:"bond_active_slave"` + BondDowndelay int `ovsdb:"bond_downdelay"` + BondFakeIface bool `ovsdb:"bond_fake_iface"` + BondMode *PortBondMode `ovsdb:"bond_mode"` + BondUpdelay int `ovsdb:"bond_updelay"` + CVLANs []int `ovsdb:"cvlans"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + FakeBridge bool `ovsdb:"fake_bridge"` + Interfaces []string `ovsdb:"interfaces"` + LACP *PortLACP `ovsdb:"lacp"` + MAC *string `ovsdb:"mac"` + Name string `ovsdb:"name"` + OtherConfig map[string]string `ovsdb:"other_config"` + Protected bool `ovsdb:"protected"` + QOS *string `ovsdb:"qos"` + RSTPStatistics map[string]int `ovsdb:"rstp_statistics"` + RSTPStatus map[string]string `ovsdb:"rstp_status"` + Statistics map[string]int `ovsdb:"statistics"` + Status map[string]string `ovsdb:"status"` + Tag *int `ovsdb:"tag"` + Trunks []int `ovsdb:"trunks"` + VLANMode *PortVLANMode `ovsdb:"vlan_mode"` +} + +func (a *Port) GetUUID() string { + return a.UUID +} + +func (a *Port) GetBondActiveSlave() *string { + return a.BondActiveSlave +} + +func copyPortBondActiveSlave(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalPortBondActiveSlave(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Port) GetBondDowndelay() int { + return a.BondDowndelay +} + +func (a *Port) GetBondFakeIface() bool { + return a.BondFakeIface +} + +func (a *Port) GetBondMode() *PortBondMode { + return a.BondMode +} + +func copyPortBondMode(a *PortBondMode) *PortBondMode { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalPortBondMode(a, b *PortBondMode) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Port) GetBondUpdelay() int { + return a.BondUpdelay +} + +func (a *Port) GetCVLANs() []int { + return a.CVLANs +} + +func copyPortCVLANs(a []int) []int { + if a == nil { + return nil + } + b := make([]int, len(a)) + copy(b, a) + return b +} + +func equalPortCVLANs(a, b []int) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Port) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyPortExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalPortExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Port) GetFakeBridge() bool { + return a.FakeBridge +} + +func (a *Port) GetInterfaces() []string { + return a.Interfaces +} + +func copyPortInterfaces(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalPortInterfaces(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Port) GetLACP() *PortLACP { + return a.LACP +} + +func copyPortLACP(a *PortLACP) *PortLACP { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalPortLACP(a, b *PortLACP) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Port) GetMAC() *string { + return a.MAC +} + +func copyPortMAC(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalPortMAC(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Port) GetName() string { + return a.Name +} + +func (a *Port) GetOtherConfig() map[string]string { + return a.OtherConfig +} + +func copyPortOtherConfig(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalPortOtherConfig(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Port) GetProtected() bool { + return a.Protected +} + +func (a *Port) GetQOS() *string { + return a.QOS +} + +func copyPortQOS(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalPortQOS(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Port) GetRSTPStatistics() map[string]int { + return a.RSTPStatistics +} + +func copyPortRSTPStatistics(a map[string]int) map[string]int { + if a == nil { + return nil + } + b := make(map[string]int, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalPortRSTPStatistics(a, b map[string]int) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Port) GetRSTPStatus() map[string]string { + return a.RSTPStatus +} + +func copyPortRSTPStatus(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalPortRSTPStatus(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Port) GetStatistics() map[string]int { + return a.Statistics +} + +func copyPortStatistics(a map[string]int) map[string]int { + if a == nil { + return nil + } + b := make(map[string]int, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalPortStatistics(a, b map[string]int) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Port) GetStatus() map[string]string { + return a.Status +} + +func copyPortStatus(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalPortStatus(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Port) GetTag() *int { + return a.Tag +} + +func copyPortTag(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalPortTag(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Port) GetTrunks() []int { + return a.Trunks +} + +func copyPortTrunks(a []int) []int { + if a == nil { + return nil + } + b := make([]int, len(a)) + copy(b, a) + return b +} + +func equalPortTrunks(a, b []int) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Port) GetVLANMode() *PortVLANMode { + return a.VLANMode +} + +func copyPortVLANMode(a *PortVLANMode) *PortVLANMode { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalPortVLANMode(a, b *PortVLANMode) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Port) DeepCopyInto(b *Port) { + *b = *a + b.BondActiveSlave = copyPortBondActiveSlave(a.BondActiveSlave) + b.BondMode = copyPortBondMode(a.BondMode) + b.CVLANs = copyPortCVLANs(a.CVLANs) + b.ExternalIDs = copyPortExternalIDs(a.ExternalIDs) + b.Interfaces = copyPortInterfaces(a.Interfaces) + b.LACP = copyPortLACP(a.LACP) + b.MAC = copyPortMAC(a.MAC) + b.OtherConfig = copyPortOtherConfig(a.OtherConfig) + b.QOS = copyPortQOS(a.QOS) + b.RSTPStatistics = copyPortRSTPStatistics(a.RSTPStatistics) + b.RSTPStatus = copyPortRSTPStatus(a.RSTPStatus) + b.Statistics = copyPortStatistics(a.Statistics) + b.Status = copyPortStatus(a.Status) + b.Tag = copyPortTag(a.Tag) + b.Trunks = copyPortTrunks(a.Trunks) + b.VLANMode = copyPortVLANMode(a.VLANMode) +} + +func (a *Port) DeepCopy() *Port { + b := new(Port) + a.DeepCopyInto(b) + return b +} + +func (a *Port) CloneModelInto(b model.Model) { + c := b.(*Port) + a.DeepCopyInto(c) +} + +func (a *Port) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Port) Equals(b *Port) bool { + return a.UUID == b.UUID && + equalPortBondActiveSlave(a.BondActiveSlave, b.BondActiveSlave) && + a.BondDowndelay == b.BondDowndelay && + a.BondFakeIface == b.BondFakeIface && + equalPortBondMode(a.BondMode, b.BondMode) && + a.BondUpdelay == b.BondUpdelay && + equalPortCVLANs(a.CVLANs, b.CVLANs) && + equalPortExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.FakeBridge == b.FakeBridge && + equalPortInterfaces(a.Interfaces, b.Interfaces) && + equalPortLACP(a.LACP, b.LACP) && + equalPortMAC(a.MAC, b.MAC) && + a.Name == b.Name && + equalPortOtherConfig(a.OtherConfig, b.OtherConfig) && + a.Protected == b.Protected && + equalPortQOS(a.QOS, b.QOS) && + equalPortRSTPStatistics(a.RSTPStatistics, b.RSTPStatistics) && + equalPortRSTPStatus(a.RSTPStatus, b.RSTPStatus) && + equalPortStatistics(a.Statistics, b.Statistics) && + equalPortStatus(a.Status, b.Status) && + equalPortTag(a.Tag, b.Tag) && + equalPortTrunks(a.Trunks, b.Trunks) && + equalPortVLANMode(a.VLANMode, b.VLANMode) +} + +func (a *Port) EqualsModel(b model.Model) bool { + c := b.(*Port) + return a.Equals(c) +} + +var _ model.CloneableModel = &Port{} +var _ model.ComparableModel = &Port{} diff --git a/go-controller/pkg/vswitchd/qos.go b/go-controller/pkg/vswitchd/qos.go new file mode 100644 index 0000000000..aa1c9dd004 --- /dev/null +++ b/go-controller/pkg/vswitchd/qos.go @@ -0,0 +1,153 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package vswitchd + +import "github.com/ovn-org/libovsdb/model" + +const QoSTable = "QoS" + +// QoS defines an object in QoS table +type QoS struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + OtherConfig map[string]string `ovsdb:"other_config"` + Queues map[int]string `ovsdb:"queues"` + Type string `ovsdb:"type"` +} + +func (a *QoS) GetUUID() string { + return a.UUID +} + +func (a *QoS) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyQoSExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalQoSExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *QoS) GetOtherConfig() map[string]string { + return a.OtherConfig +} + +func copyQoSOtherConfig(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalQoSOtherConfig(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *QoS) GetQueues() map[int]string { + return a.Queues +} + +func copyQoSQueues(a map[int]string) map[int]string { + if a == nil { + return nil + } + b := make(map[int]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalQoSQueues(a, b map[int]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *QoS) GetType() string { + return a.Type +} + +func (a *QoS) DeepCopyInto(b *QoS) { + *b = *a + b.ExternalIDs = copyQoSExternalIDs(a.ExternalIDs) + b.OtherConfig = copyQoSOtherConfig(a.OtherConfig) + b.Queues = copyQoSQueues(a.Queues) +} + +func (a *QoS) DeepCopy() *QoS { + b := new(QoS) + a.DeepCopyInto(b) + return b +} + +func (a *QoS) CloneModelInto(b model.Model) { + c := b.(*QoS) + a.DeepCopyInto(c) +} + +func (a *QoS) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *QoS) Equals(b *QoS) bool { + return a.UUID == b.UUID && + equalQoSExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalQoSOtherConfig(a.OtherConfig, b.OtherConfig) && + equalQoSQueues(a.Queues, b.Queues) && + a.Type == b.Type +} + +func (a *QoS) EqualsModel(b model.Model) bool { + c := b.(*QoS) + return a.Equals(c) +} + +var _ model.CloneableModel = &QoS{} +var _ model.ComparableModel = &QoS{} diff --git a/go-controller/pkg/vswitchd/queue.go b/go-controller/pkg/vswitchd/queue.go new file mode 100644 index 0000000000..e8615e9cf7 --- /dev/null +++ b/go-controller/pkg/vswitchd/queue.go @@ -0,0 +1,139 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package vswitchd + +import "github.com/ovn-org/libovsdb/model" + +const QueueTable = "Queue" + +// Queue defines an object in Queue table +type Queue struct { + UUID string `ovsdb:"_uuid"` + DSCP *int `ovsdb:"dscp"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + OtherConfig map[string]string `ovsdb:"other_config"` +} + +func (a *Queue) GetUUID() string { + return a.UUID +} + +func (a *Queue) GetDSCP() *int { + return a.DSCP +} + +func copyQueueDSCP(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalQueueDSCP(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Queue) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyQueueExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalQueueExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Queue) GetOtherConfig() map[string]string { + return a.OtherConfig +} + +func copyQueueOtherConfig(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalQueueOtherConfig(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Queue) DeepCopyInto(b *Queue) { + *b = *a + b.DSCP = copyQueueDSCP(a.DSCP) + b.ExternalIDs = copyQueueExternalIDs(a.ExternalIDs) + b.OtherConfig = copyQueueOtherConfig(a.OtherConfig) +} + +func (a *Queue) DeepCopy() *Queue { + b := new(Queue) + a.DeepCopyInto(b) + return b +} + +func (a *Queue) CloneModelInto(b model.Model) { + c := b.(*Queue) + a.DeepCopyInto(c) +} + +func (a *Queue) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Queue) Equals(b *Queue) bool { + return a.UUID == b.UUID && + equalQueueDSCP(a.DSCP, b.DSCP) && + equalQueueExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalQueueOtherConfig(a.OtherConfig, b.OtherConfig) +} + +func (a *Queue) EqualsModel(b model.Model) bool { + c := b.(*Queue) + return a.Equals(c) +} + +var _ model.CloneableModel = &Queue{} +var _ model.ComparableModel = &Queue{} diff --git a/go-controller/pkg/vswitchd/sflow.go b/go-controller/pkg/vswitchd/sflow.go new file mode 100644 index 0000000000..fcbcc8569e --- /dev/null +++ b/go-controller/pkg/vswitchd/sflow.go @@ -0,0 +1,212 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package vswitchd + +import "github.com/ovn-org/libovsdb/model" + +const SFlowTable = "sFlow" + +// SFlow defines an object in sFlow table +type SFlow struct { + UUID string `ovsdb:"_uuid"` + Agent *string `ovsdb:"agent"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Header *int `ovsdb:"header"` + Polling *int `ovsdb:"polling"` + Sampling *int `ovsdb:"sampling"` + Targets []string `ovsdb:"targets"` +} + +func (a *SFlow) GetUUID() string { + return a.UUID +} + +func (a *SFlow) GetAgent() *string { + return a.Agent +} + +func copySFlowAgent(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalSFlowAgent(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *SFlow) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copySFlowExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalSFlowExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *SFlow) GetHeader() *int { + return a.Header +} + +func copySFlowHeader(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalSFlowHeader(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *SFlow) GetPolling() *int { + return a.Polling +} + +func copySFlowPolling(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalSFlowPolling(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *SFlow) GetSampling() *int { + return a.Sampling +} + +func copySFlowSampling(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalSFlowSampling(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *SFlow) GetTargets() []string { + return a.Targets +} + +func copySFlowTargets(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalSFlowTargets(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *SFlow) DeepCopyInto(b *SFlow) { + *b = *a + b.Agent = copySFlowAgent(a.Agent) + b.ExternalIDs = copySFlowExternalIDs(a.ExternalIDs) + b.Header = copySFlowHeader(a.Header) + b.Polling = copySFlowPolling(a.Polling) + b.Sampling = copySFlowSampling(a.Sampling) + b.Targets = copySFlowTargets(a.Targets) +} + +func (a *SFlow) DeepCopy() *SFlow { + b := new(SFlow) + a.DeepCopyInto(b) + return b +} + +func (a *SFlow) CloneModelInto(b model.Model) { + c := b.(*SFlow) + a.DeepCopyInto(c) +} + +func (a *SFlow) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *SFlow) Equals(b *SFlow) bool { + return a.UUID == b.UUID && + equalSFlowAgent(a.Agent, b.Agent) && + equalSFlowExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalSFlowHeader(a.Header, b.Header) && + equalSFlowPolling(a.Polling, b.Polling) && + equalSFlowSampling(a.Sampling, b.Sampling) && + equalSFlowTargets(a.Targets, b.Targets) +} + +func (a *SFlow) EqualsModel(b model.Model) bool { + c := b.(*SFlow) + return a.Equals(c) +} + +var _ model.CloneableModel = &SFlow{} +var _ model.ComparableModel = &SFlow{} diff --git a/go-controller/pkg/vswitchd/ssl.go b/go-controller/pkg/vswitchd/ssl.go new file mode 100644 index 0000000000..79c4b1bad4 --- /dev/null +++ b/go-controller/pkg/vswitchd/ssl.go @@ -0,0 +1,105 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package vswitchd + +import "github.com/ovn-org/libovsdb/model" + +const SSLTable = "SSL" + +// SSL defines an object in SSL table +type SSL struct { + UUID string `ovsdb:"_uuid"` + BootstrapCaCert bool `ovsdb:"bootstrap_ca_cert"` + CaCert string `ovsdb:"ca_cert"` + Certificate string `ovsdb:"certificate"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + PrivateKey string `ovsdb:"private_key"` +} + +func (a *SSL) GetUUID() string { + return a.UUID +} + +func (a *SSL) GetBootstrapCaCert() bool { + return a.BootstrapCaCert +} + +func (a *SSL) GetCaCert() string { + return a.CaCert +} + +func (a *SSL) GetCertificate() string { + return a.Certificate +} + +func (a *SSL) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copySSLExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalSSLExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *SSL) GetPrivateKey() string { + return a.PrivateKey +} + +func (a *SSL) DeepCopyInto(b *SSL) { + *b = *a + b.ExternalIDs = copySSLExternalIDs(a.ExternalIDs) +} + +func (a *SSL) DeepCopy() *SSL { + b := new(SSL) + a.DeepCopyInto(b) + return b +} + +func (a *SSL) CloneModelInto(b model.Model) { + c := b.(*SSL) + a.DeepCopyInto(c) +} + +func (a *SSL) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *SSL) Equals(b *SSL) bool { + return a.UUID == b.UUID && + a.BootstrapCaCert == b.BootstrapCaCert && + a.CaCert == b.CaCert && + a.Certificate == b.Certificate && + equalSSLExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.PrivateKey == b.PrivateKey +} + +func (a *SSL) EqualsModel(b model.Model) bool { + c := b.(*SSL) + return a.Equals(c) +} + +var _ model.CloneableModel = &SSL{} +var _ model.ComparableModel = &SSL{} diff --git a/go-controller/pkg/vswitchd/vswitch.ovsschema b/go-controller/pkg/vswitchd/vswitch.ovsschema new file mode 100644 index 0000000000..4873cfde72 --- /dev/null +++ b/go-controller/pkg/vswitchd/vswitch.ovsschema @@ -0,0 +1,720 @@ +{"name": "Open_vSwitch", + "version": "8.3.0", + "cksum": "3781850481 26690", + "tables": { + "Open_vSwitch": { + "columns": { + "datapaths": { + "type": {"key": {"type": "string"}, + "value": {"type": "uuid", + "refTable": "Datapath"}, + "min": 0, "max": "unlimited"}}, + "bridges": { + "type": {"key": {"type": "uuid", + "refTable": "Bridge"}, + "min": 0, "max": "unlimited"}}, + "manager_options": { + "type": {"key": {"type": "uuid", + "refTable": "Manager"}, + "min": 0, "max": "unlimited"}}, + "ssl": { + "type": {"key": {"type": "uuid", + "refTable": "SSL"}, + "min": 0, "max": 1}}, + "other_config": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "next_cfg": { + "type": "integer"}, + "cur_cfg": { + "type": "integer"}, + "statistics": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}, + "ephemeral": true}, + "ovs_version": { + "type": {"key": {"type": "string"}, + "min": 0, "max": 1}}, + "db_version": { + "type": {"key": {"type": "string"}, + "min": 0, "max": 1}}, + "system_type": { + "type": {"key": {"type": "string"}, + "min": 0, "max": 1}}, + "system_version": { + "type": {"key": {"type": "string"}, + "min": 0, "max": 1}}, + "datapath_types": { + "type": {"key": {"type": "string"}, + "min": 0, "max": "unlimited"}}, + "iface_types": { + "type": {"key": {"type": "string"}, + "min": 0, "max": "unlimited"}}, + "dpdk_initialized": { + "type": "boolean"}, + "dpdk_version": { + "type": {"key": {"type": "string"}, + "min": 0, "max": 1}}}, + "isRoot": true, + "maxRows": 1}, + "Bridge": { + "columns": { + "name": { + "type": "string", + "mutable": false}, + "datapath_type": { + "type": "string"}, + "datapath_version": { + "type": "string"}, + "datapath_id": { + "type": {"key": "string", "min": 0, "max": 1}, + "ephemeral": true}, + "stp_enable": { + "type": "boolean"}, + "rstp_enable": { + "type": "boolean"}, + "mcast_snooping_enable": { + "type": "boolean"}, + "ports": { + "type": {"key": {"type": "uuid", + "refTable": "Port"}, + "min": 0, "max": "unlimited"}}, + "mirrors": { + "type": {"key": {"type": "uuid", + "refTable": "Mirror"}, + "min": 0, "max": "unlimited"}}, + "netflow": { + "type": {"key": {"type": "uuid", + "refTable": "NetFlow"}, + "min": 0, "max": 1}}, + "sflow": { + "type": {"key": {"type": "uuid", + "refTable": "sFlow"}, + "min": 0, "max": 1}}, + "ipfix": { + "type": {"key": {"type": "uuid", + "refTable": "IPFIX"}, + "min": 0, "max": 1}}, + "controller": { + "type": {"key": {"type": "uuid", + "refTable": "Controller"}, + "min": 0, "max": "unlimited"}}, + "protocols": { + "type": {"key": {"type": "string", + "enum": ["set", ["OpenFlow10", + "OpenFlow11", + "OpenFlow12", + "OpenFlow13", + "OpenFlow14", + "OpenFlow15"]]}, + "min": 0, "max": "unlimited"}}, + "fail_mode": { + "type": {"key": {"type": "string", + "enum": ["set", ["standalone", "secure"]]}, + "min": 0, "max": 1}}, + "status": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}, + "ephemeral": true}, + "rstp_status": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}, + "ephemeral": true}, + "other_config": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "flood_vlans": { + "type": {"key": {"type": "integer", + "minInteger": 0, + "maxInteger": 4095}, + "min": 0, "max": 4096}}, + "flow_tables": { + "type": {"key": {"type": "integer", + "minInteger": 0, + "maxInteger": 254}, + "value": {"type": "uuid", + "refTable": "Flow_Table"}, + "min": 0, "max": "unlimited"}}, + "auto_attach": { + "type": {"key": {"type": "uuid", + "refTable": "AutoAttach"}, + "min": 0, "max": 1}}}, + "indexes": [["name"]]}, + "Port": { + "columns": { + "name": { + "type": "string", + "mutable": false}, + "interfaces": { + "type": {"key": {"type": "uuid", + "refTable": "Interface"}, + "min": 1, "max": "unlimited"}}, + "trunks": { + "type": {"key": {"type": "integer", + "minInteger": 0, + "maxInteger": 4095}, + "min": 0, "max": 4096}}, + "cvlans": { + "type": {"key": {"type": "integer", + "minInteger": 0, + "maxInteger": 4095}, + "min": 0, "max": 4096}}, + "tag": { + "type": {"key": {"type": "integer", + "minInteger": 0, + "maxInteger": 4095}, + "min": 0, "max": 1}}, + "vlan_mode": { + "type": {"key": {"type": "string", + "enum": ["set", ["trunk", "access", "native-tagged", + "native-untagged", "dot1q-tunnel"]]}, + "min": 0, "max": 1}}, + "qos": { + "type": {"key": {"type": "uuid", + "refTable": "QoS"}, + "min": 0, "max": 1}}, + "mac": { + "type": {"key": {"type": "string"}, + "min": 0, "max": 1}}, + "bond_mode": { + "type": {"key": {"type": "string", + "enum": ["set", ["balance-tcp", "balance-slb", "active-backup"]]}, + "min": 0, "max": 1}}, + "lacp": { + "type": {"key": {"type": "string", + "enum": ["set", ["active", "passive", "off"]]}, + "min": 0, "max": 1}}, + "bond_updelay": { + "type": "integer"}, + "bond_downdelay": { + "type": "integer"}, + "bond_active_slave": { + "type": {"key": {"type": "string"}, + "min": 0, "max": 1}}, + "bond_fake_iface": { + "type": "boolean"}, + "fake_bridge": { + "type": "boolean"}, + "status": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}, + "ephemeral": true}, + "rstp_status": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}, + "ephemeral": true}, + "rstp_statistics": { + "type": {"key": "string", "value": "integer", + "min": 0, "max": "unlimited"}, + "ephemeral": true}, + "statistics": { + "type": {"key": "string", "value": "integer", + "min": 0, "max": "unlimited"}, + "ephemeral": true}, + "protected": { + "type": "boolean"}, + "other_config": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}, + "indexes": [["name"]]}, + "Interface": { + "columns": { + "name": { + "type": "string", + "mutable": false}, + "type": { + "type": "string"}, + "options": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "ingress_policing_rate": { + "type": {"key": {"type": "integer", + "minInteger": 0}}}, + "ingress_policing_burst": { + "type": {"key": {"type": "integer", + "minInteger": 0}}}, + "ingress_policing_kpkts_rate": { + "type": {"key": {"type": "integer", + "minInteger": 0}}}, + "ingress_policing_kpkts_burst": { + "type": {"key": {"type": "integer", + "minInteger": 0}}}, + "mac_in_use": { + "type": {"key": {"type": "string"}, + "min": 0, "max": 1}, + "ephemeral": true}, + "mac": { + "type": {"key": {"type": "string"}, + "min": 0, "max": 1}}, + "ifindex": { + "type": { + "key": {"type": "integer", + "minInteger": 0, + "maxInteger": 4294967295}, + "min": 0, + "max": 1}, + "ephemeral": true}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "ofport": { + "type": {"key": "integer", "min": 0, "max": 1}}, + "ofport_request": { + "type": { + "key": {"type": "integer", + "minInteger": 1, + "maxInteger": 65279}, + "min": 0, + "max": 1}}, + "bfd": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "bfd_status": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "cfm_mpid": { + "type": { + "key": {"type": "integer"}, + "min": 0, + "max": 1}}, + "cfm_remote_mpids": { + "type": { + "key": {"type": "integer"}, + "min": 0, + "max": "unlimited"}, + "ephemeral": true}, + "cfm_flap_count": { + "type": { + "key": {"type": "integer"}, + "min": 0, + "max": 1}}, + "cfm_fault": { + "type": { + "key": { "type": "boolean"}, + "min": 0, + "max": 1}, + "ephemeral": true}, + "cfm_fault_status": { + "type": { + "key": "string", "min": 0, "max": "unlimited"}, + "ephemeral": true}, + "cfm_remote_opstate": { + "type": {"key": {"type": "string", + "enum": ["set", ["up", "down"]]}, + "min": 0, "max": 1}, + "ephemeral": true}, + "cfm_health": { + "type": {"key": {"type": "integer", + "minInteger": 0, + "maxInteger": 100}, + "min": 0, "max": 1}, + "ephemeral": true}, + "lacp_current": { + "type": {"key": {"type": "boolean"}, + "min": 0, "max": 1}, + "ephemeral": true}, + "lldp": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "other_config": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "statistics": { + "type": {"key": "string", "value": "integer", + "min": 0, "max": "unlimited"}, + "ephemeral": true}, + "status": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}, + "ephemeral": true}, + "admin_state": { + "type": {"key": {"type": "string", + "enum": ["set", ["up", "down"]]}, + "min": 0, "max": 1}, + "ephemeral": true}, + "link_state": { + "type": {"key": {"type": "string", + "enum": ["set", ["up", "down"]]}, + "min": 0, "max": 1}, + "ephemeral": true}, + "link_resets": { + "type": {"key": {"type": "integer"}, + "min": 0, "max": 1}, + "ephemeral": true}, + "link_speed": { + "type": {"key": "integer", "min": 0, "max": 1}, + "ephemeral": true}, + "duplex": { + "type": {"key": {"type": "string", + "enum": ["set", ["half", "full"]]}, + "min": 0, "max": 1}, + "ephemeral": true}, + "mtu": { + "type": {"key": "integer", "min": 0, "max": 1}, + "ephemeral": true}, + "mtu_request": { + "type": { + "key": {"type": "integer", + "minInteger": 1}, + "min": 0, + "max": 1}}, + "error": { + "type": {"key": "string", "min": 0, "max": 1}}}, + "indexes": [["name"]]}, + "Flow_Table": { + "columns": { + "name": { + "type": {"key": "string", "min": 0, "max": 1}}, + "flow_limit": { + "type": {"key": {"type": "integer", "minInteger": 0}, + "min": 0, "max": 1}}, + "overflow_policy": { + "type": {"key": {"type": "string", + "enum": ["set", ["refuse", "evict"]]}, + "min": 0, "max": 1}}, + "groups": { + "type": {"key": "string", "min": 0, "max": "unlimited"}}, + "prefixes": { + "type": {"key": "string", "min": 0, "max": 3}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}}, + "QoS": { + "columns": { + "type": { + "type": "string"}, + "queues": { + "type": {"key": {"type": "integer", + "minInteger": 0, + "maxInteger": 4294967295}, + "value": {"type": "uuid", + "refTable": "Queue"}, + "min": 0, "max": "unlimited"}}, + "other_config": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}, + "isRoot": true}, + "Queue": { + "columns": { + "dscp": { + "type": {"key": {"type": "integer", + "minInteger": 0, + "maxInteger": 63}, + "min": 0, "max": 1}}, + "other_config": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}, + "isRoot": true}, + "Mirror": { + "columns": { + "name": { + "type": "string"}, + "select_all": { + "type": "boolean"}, + "select_src_port": { + "type": {"key": {"type": "uuid", + "refTable": "Port", + "refType": "weak"}, + "min": 0, "max": "unlimited"}}, + "select_dst_port": { + "type": {"key": {"type": "uuid", + "refTable": "Port", + "refType": "weak"}, + "min": 0, "max": "unlimited"}}, + "select_vlan": { + "type": {"key": {"type": "integer", + "minInteger": 0, + "maxInteger": 4095}, + "min": 0, "max": 4096}}, + "output_port": { + "type": {"key": {"type": "uuid", + "refTable": "Port", + "refType": "weak"}, + "min": 0, "max": 1}}, + "output_vlan": { + "type": {"key": {"type": "integer", + "minInteger": 1, + "maxInteger": 4095}, + "min": 0, "max": 1}}, + "snaplen": { + "type": {"key": {"type": "integer", + "minInteger": 14, + "maxInteger": 65535}, + "min": 0, "max": 1}}, + "statistics": { + "type": {"key": "string", "value": "integer", + "min": 0, "max": "unlimited"}, + "ephemeral": true}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}}, + "NetFlow": { + "columns": { + "targets": { + "type": {"key": {"type": "string"}, + "min": 1, "max": "unlimited"}}, + "engine_type": { + "type": {"key": {"type": "integer", + "minInteger": 0, + "maxInteger": 255}, + "min": 0, "max": 1}}, + "engine_id": { + "type": {"key": {"type": "integer", + "minInteger": 0, + "maxInteger": 255}, + "min": 0, "max": 1}}, + "add_id_to_interface": { + "type": "boolean"}, + "active_timeout": { + "type": {"key": {"type": "integer", + "minInteger": -1}}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}}, + "sFlow": { + "columns": { + "targets": { + "type": {"key": "string", "min": 1, "max": "unlimited"}}, + "sampling": { + "type": {"key": "integer", "min": 0, "max": 1}}, + "polling": { + "type": {"key": "integer", "min": 0, "max": 1}}, + "header": { + "type": {"key": "integer", "min": 0, "max": 1}}, + "agent": { + "type": {"key": "string", "min": 0, "max": 1}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}}, + "IPFIX": { + "columns": { + "targets": { + "type": {"key": "string", "min": 0, "max": "unlimited"}}, + "sampling": { + "type": {"key": {"type": "integer", + "minInteger": 1, + "maxInteger": 4294967295}, + "min": 0, "max": 1}}, + "obs_domain_id": { + "type": {"key": {"type": "integer", + "minInteger": 0, + "maxInteger": 4294967295}, + "min": 0, "max": 1}}, + "obs_point_id": { + "type": {"key": {"type": "integer", + "minInteger": 0, + "maxInteger": 4294967295}, + "min": 0, "max": 1}}, + "cache_active_timeout": { + "type": {"key": {"type": "integer", + "minInteger": 0, + "maxInteger": 4200}, + "min": 0, "max": 1}}, + "cache_max_flows": { + "type": {"key": {"type": "integer", + "minInteger": 0, + "maxInteger": 4294967295}, + "min": 0, "max": 1}}, + "other_config": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}}, + "Flow_Sample_Collector_Set": { + "columns": { + "id": { + "type": {"key": {"type": "integer", + "minInteger": 0, + "maxInteger": 4294967295}, + "min": 1, "max": 1}}, + "bridge": { + "type": {"key": {"type": "uuid", + "refTable": "Bridge"}, + "min": 1, "max": 1}}, + "ipfix": { + "type": {"key": {"type": "uuid", + "refTable": "IPFIX"}, + "min": 0, "max": 1}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}, + "isRoot": true, + "indexes": [["id", "bridge"]]}, + "Controller": { + "columns": { + "type": { + "type": {"key": {"type": "string", + "enum": ["set", ["primary", "service"]]}, + "min": 0, "max": 1}}, + "target": { + "type": "string"}, + "max_backoff": { + "type": {"key": {"type": "integer", + "minInteger": 1000}, + "min": 0, "max": 1}}, + "inactivity_probe": { + "type": {"key": "integer", "min": 0, "max": 1}}, + "connection_mode": { + "type": {"key": {"type": "string", + "enum": ["set", ["in-band", "out-of-band"]]}, + "min": 0, "max": 1}}, + "local_ip": { + "type": {"key": {"type": "string"}, + "min": 0, "max": 1}}, + "local_netmask": { + "type": {"key": {"type": "string"}, + "min": 0, "max": 1}}, + "local_gateway": { + "type": {"key": {"type": "string"}, + "min": 0, "max": 1}}, + "enable_async_messages": { + "type": {"key": {"type": "boolean"}, + "min": 0, "max": 1}}, + "controller_queue_size": { + "type": {"key": {"type": "integer", + "minInteger": 1, + "maxInteger": 512}, + "min": 0, "max": 1}}, + "controller_rate_limit": { + "type": {"key": {"type": "integer", + "minInteger": 100}, + "min": 0, "max": 1}}, + "controller_burst_limit": { + "type": {"key": {"type": "integer", + "minInteger": 25}, + "min": 0, "max": 1}}, + "other_config": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "is_connected": { + "type": "boolean", + "ephemeral": true}, + "role": { + "type": {"key": {"type": "string", + "enum": ["set", ["other", "master", "slave"]]}, + "min": 0, "max": 1}, + "ephemeral": true}, + "status": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}, + "ephemeral": true}}}, + "Manager": { + "columns": { + "target": { + "type": "string"}, + "max_backoff": { + "type": {"key": {"type": "integer", + "minInteger": 1000}, + "min": 0, "max": 1}}, + "inactivity_probe": { + "type": {"key": "integer", "min": 0, "max": 1}}, + "connection_mode": { + "type": {"key": {"type": "string", + "enum": ["set", ["in-band", "out-of-band"]]}, + "min": 0, "max": 1}}, + "other_config": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "is_connected": { + "type": "boolean", + "ephemeral": true}, + "status": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}, + "ephemeral": true}}, + "indexes": [["target"]]}, + "Datapath": { + "columns": { + "datapath_version": { + "type": "string"}, + "ct_zones": { + "type": {"key": {"type": "integer", + "minInteger": 0, + "maxInteger": 65535}, + "value": {"type": "uuid", + "refTable": "CT_Zone"}, + "min": 0, "max": "unlimited"}}, + "capabilities": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}}, + "CT_Zone": { + "columns": { + "timeout_policy": { + "type": {"key": {"type": "uuid", + "refTable": "CT_Timeout_Policy"}, + "min": 0, "max": 1}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}}, + "CT_Timeout_Policy": { + "columns": { + "timeouts": { + "type": {"key": {"type" : "string", + "enum": ["set", ["tcp_syn_sent", "tcp_syn_recv", + "tcp_established", "tcp_fin_wait", + "tcp_close_wait", "tcp_last_ack", + "tcp_time_wait", "tcp_close", + "tcp_syn_sent2", "tcp_retransmit", + "tcp_unack", "udp_first", + "udp_single", "udp_multiple", + "icmp_first", "icmp_reply"]]}, + "value": {"type" : "integer", + "minInteger" : 0, + "maxInteger" : 4294967295}, + "min": 0, "max": "unlimited"}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}}, + "SSL": { + "columns": { + "private_key": { + "type": "string"}, + "certificate": { + "type": "string"}, + "ca_cert": { + "type": "string"}, + "bootstrap_ca_cert": { + "type": "boolean"}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}, + "maxRows": 1}, + "AutoAttach": { + "columns": { + "system_name": { + "type": "string"}, + "system_description": { + "type": "string"}, + "mappings": { + "type": {"key": {"type": "integer", + "minInteger": 0, + "maxInteger": 16777215}, + "value": {"type": "integer", + "minInteger": 0, + "maxInteger": 4095}, + "min": 0, "max": "unlimited"}}}}}} diff --git a/go-controller/vendor/github.com/cenkalti/backoff/v4/README.md b/go-controller/vendor/github.com/cenkalti/backoff/v4/README.md index 16abdfc084..9433004a28 100644 --- a/go-controller/vendor/github.com/cenkalti/backoff/v4/README.md +++ b/go-controller/vendor/github.com/cenkalti/backoff/v4/README.md @@ -1,4 +1,4 @@ -# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls] +# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Coverage Status][coveralls image]][coveralls] This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. @@ -21,8 +21,6 @@ Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation. [godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4 [godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png -[travis]: https://travis-ci.org/cenkalti/backoff -[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master [coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master [coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master diff --git a/go-controller/vendor/github.com/cenkalti/backoff/v4/exponential.go b/go-controller/vendor/github.com/cenkalti/backoff/v4/exponential.go index 2c56c1e718..aac99f196a 100644 --- a/go-controller/vendor/github.com/cenkalti/backoff/v4/exponential.go +++ b/go-controller/vendor/github.com/cenkalti/backoff/v4/exponential.go @@ -71,6 +71,9 @@ type Clock interface { Now() time.Time } +// ExponentialBackOffOpts is a function type used to configure ExponentialBackOff options. +type ExponentialBackOffOpts func(*ExponentialBackOff) + // Default values for ExponentialBackOff. const ( DefaultInitialInterval = 500 * time.Millisecond @@ -81,7 +84,7 @@ const ( ) // NewExponentialBackOff creates an instance of ExponentialBackOff using default values. -func NewExponentialBackOff() *ExponentialBackOff { +func NewExponentialBackOff(opts ...ExponentialBackOffOpts) *ExponentialBackOff { b := &ExponentialBackOff{ InitialInterval: DefaultInitialInterval, RandomizationFactor: DefaultRandomizationFactor, @@ -91,10 +94,62 @@ func NewExponentialBackOff() *ExponentialBackOff { Stop: Stop, Clock: SystemClock, } + for _, fn := range opts { + fn(b) + } b.Reset() return b } +// WithInitialInterval sets the initial interval between retries. +func WithInitialInterval(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.InitialInterval = duration + } +} + +// WithRandomizationFactor sets the randomization factor to add jitter to intervals. +func WithRandomizationFactor(randomizationFactor float64) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.RandomizationFactor = randomizationFactor + } +} + +// WithMultiplier sets the multiplier for increasing the interval after each retry. +func WithMultiplier(multiplier float64) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.Multiplier = multiplier + } +} + +// WithMaxInterval sets the maximum interval between retries. +func WithMaxInterval(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.MaxInterval = duration + } +} + +// WithMaxElapsedTime sets the maximum total time for retries. +func WithMaxElapsedTime(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.MaxElapsedTime = duration + } +} + +// WithRetryStopDuration sets the duration after which retries should stop. +func WithRetryStopDuration(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.Stop = duration + } +} + +// WithClockProvider sets the clock used to measure time. +func WithClockProvider(clock Clock) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.Clock = clock + } +} + type systemClock struct{} func (t systemClock) Now() time.Time { diff --git a/go-controller/vendor/github.com/cespare/xxhash/v2/README.md b/go-controller/vendor/github.com/cespare/xxhash/v2/README.md index 8bf0e5b781..33c88305c4 100644 --- a/go-controller/vendor/github.com/cespare/xxhash/v2/README.md +++ b/go-controller/vendor/github.com/cespare/xxhash/v2/README.md @@ -70,3 +70,5 @@ benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') - [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) - [FreeCache](https://github.com/coocood/freecache) - [FastCache](https://github.com/VictoriaMetrics/fastcache) +- [Ristretto](https://github.com/dgraph-io/ristretto) +- [Badger](https://github.com/dgraph-io/badger) diff --git a/go-controller/vendor/github.com/cespare/xxhash/v2/xxhash.go b/go-controller/vendor/github.com/cespare/xxhash/v2/xxhash.go index a9e0d45c9d..78bddf1cee 100644 --- a/go-controller/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ b/go-controller/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -19,10 +19,13 @@ const ( // Store the primes in an array as well. // // The consts are used when possible in Go code to avoid MOVs but we need a -// contiguous array of the assembly code. +// contiguous array for the assembly code. var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. +// +// Note that a zero-valued Digest is not ready to receive writes. +// Call Reset or create a Digest using New before calling other methods. type Digest struct { v1 uint64 v2 uint64 @@ -33,19 +36,31 @@ type Digest struct { n int // how much of mem is used } -// New creates a new Digest that computes the 64-bit xxHash algorithm. +// New creates a new Digest with a zero seed. func New() *Digest { + return NewWithSeed(0) +} + +// NewWithSeed creates a new Digest with the given seed. +func NewWithSeed(seed uint64) *Digest { var d Digest - d.Reset() + d.ResetWithSeed(seed) return &d } // Reset clears the Digest's state so that it can be reused. +// It uses a seed value of zero. func (d *Digest) Reset() { - d.v1 = primes[0] + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -primes[0] + d.ResetWithSeed(0) +} + +// ResetWithSeed clears the Digest's state so that it can be reused. +// It uses the given seed to initialize the state. +func (d *Digest) ResetWithSeed(seed uint64) { + d.v1 = seed + prime1 + prime2 + d.v2 = seed + prime2 + d.v3 = seed + d.v4 = seed - prime1 d.total = 0 d.n = 0 } diff --git a/go-controller/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/go-controller/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go index 9216e0a40c..78f95f2561 100644 --- a/go-controller/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go +++ b/go-controller/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go @@ -6,7 +6,7 @@ package xxhash -// Sum64 computes the 64-bit xxHash digest of b. +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. // //go:noescape func Sum64(b []byte) uint64 diff --git a/go-controller/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/go-controller/vendor/github.com/cespare/xxhash/v2/xxhash_other.go index 26df13bba4..118e49e819 100644 --- a/go-controller/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ b/go-controller/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -3,7 +3,7 @@ package xxhash -// Sum64 computes the 64-bit xxHash digest of b. +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. func Sum64(b []byte) uint64 { // A simpler version would be // d := New() diff --git a/go-controller/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/go-controller/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go index e86f1b5fd8..05f5e7dfe7 100644 --- a/go-controller/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ b/go-controller/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -5,7 +5,7 @@ package xxhash -// Sum64String computes the 64-bit xxHash digest of s. +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. func Sum64String(s string) uint64 { return Sum64([]byte(s)) } diff --git a/go-controller/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/go-controller/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go index 1c1638fd88..cf9d42aed5 100644 --- a/go-controller/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ b/go-controller/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -33,7 +33,7 @@ import ( // // See https://github.com/golang/go/issues/42739 for discussion. -// Sum64String computes the 64-bit xxHash digest of s. +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. // It may be faster than Sum64([]byte(s)) by avoiding a copy. func Sum64String(s string) uint64 { b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})) diff --git a/go-controller/vendor/github.com/coreos/go-systemd/v22/LICENSE b/go-controller/vendor/github.com/coreos/go-systemd/v22/LICENSE new file mode 100644 index 0000000000..37ec93a14f --- /dev/null +++ b/go-controller/vendor/github.com/coreos/go-systemd/v22/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/go-controller/vendor/github.com/coreos/go-systemd/v22/NOTICE b/go-controller/vendor/github.com/coreos/go-systemd/v22/NOTICE new file mode 100644 index 0000000000..23a0ada2fb --- /dev/null +++ b/go-controller/vendor/github.com/coreos/go-systemd/v22/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/go-controller/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go b/go-controller/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go new file mode 100644 index 0000000000..147f756fe2 --- /dev/null +++ b/go-controller/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go @@ -0,0 +1,266 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/ +package dbus + +import ( + "context" + "encoding/hex" + "fmt" + "os" + "strconv" + "strings" + "sync" + + "github.com/godbus/dbus/v5" +) + +const ( + alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ` + num = `0123456789` + alphanum = alpha + num + signalBuffer = 100 +) + +// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped +func needsEscape(i int, b byte) bool { + // Escape everything that is not a-z-A-Z-0-9 + // Also escape 0-9 if it's the first character + return strings.IndexByte(alphanum, b) == -1 || + (i == 0 && strings.IndexByte(num, b) != -1) +} + +// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the +// rules that systemd uses for serializing special characters. +func PathBusEscape(path string) string { + // Special case the empty string + if len(path) == 0 { + return "_" + } + n := []byte{} + for i := 0; i < len(path); i++ { + c := path[i] + if needsEscape(i, c) { + e := fmt.Sprintf("_%x", c) + n = append(n, []byte(e)...) + } else { + n = append(n, c) + } + } + return string(n) +} + +// pathBusUnescape is the inverse of PathBusEscape. +func pathBusUnescape(path string) string { + if path == "_" { + return "" + } + n := []byte{} + for i := 0; i < len(path); i++ { + c := path[i] + if c == '_' && i+2 < len(path) { + res, err := hex.DecodeString(path[i+1 : i+3]) + if err == nil { + n = append(n, res...) + } + i += 2 + } else { + n = append(n, c) + } + } + return string(n) +} + +// Conn is a connection to systemd's dbus endpoint. +type Conn struct { + // sysconn/sysobj are only used to call dbus methods + sysconn *dbus.Conn + sysobj dbus.BusObject + + // sigconn/sigobj are only used to receive dbus signals + sigconn *dbus.Conn + sigobj dbus.BusObject + + jobListener struct { + jobs map[dbus.ObjectPath]chan<- string + sync.Mutex + } + subStateSubscriber struct { + updateCh chan<- *SubStateUpdate + errCh chan<- error + sync.Mutex + ignore map[dbus.ObjectPath]int64 + cleanIgnore int64 + } + propertiesSubscriber struct { + updateCh chan<- *PropertiesUpdate + errCh chan<- error + sync.Mutex + } +} + +// Deprecated: use NewWithContext instead. +func New() (*Conn, error) { + return NewWithContext(context.Background()) +} + +// NewWithContext establishes a connection to any available bus and authenticates. +// Callers should call Close() when done with the connection. +func NewWithContext(ctx context.Context) (*Conn, error) { + conn, err := NewSystemConnectionContext(ctx) + if err != nil && os.Geteuid() == 0 { + return NewSystemdConnectionContext(ctx) + } + return conn, err +} + +// Deprecated: use NewSystemConnectionContext instead. +func NewSystemConnection() (*Conn, error) { + return NewSystemConnectionContext(context.Background()) +} + +// NewSystemConnectionContext establishes a connection to the system bus and authenticates. +// Callers should call Close() when done with the connection. +func NewSystemConnectionContext(ctx context.Context) (*Conn, error) { + return NewConnection(func() (*dbus.Conn, error) { + return dbusAuthHelloConnection(ctx, dbus.SystemBusPrivate) + }) +} + +// Deprecated: use NewUserConnectionContext instead. +func NewUserConnection() (*Conn, error) { + return NewUserConnectionContext(context.Background()) +} + +// NewUserConnectionContext establishes a connection to the session bus and +// authenticates. This can be used to connect to systemd user instances. +// Callers should call Close() when done with the connection. +func NewUserConnectionContext(ctx context.Context) (*Conn, error) { + return NewConnection(func() (*dbus.Conn, error) { + return dbusAuthHelloConnection(ctx, dbus.SessionBusPrivate) + }) +} + +// Deprecated: use NewSystemdConnectionContext instead. +func NewSystemdConnection() (*Conn, error) { + return NewSystemdConnectionContext(context.Background()) +} + +// NewSystemdConnectionContext establishes a private, direct connection to systemd. +// This can be used for communicating with systemd without a dbus daemon. +// Callers should call Close() when done with the connection. +func NewSystemdConnectionContext(ctx context.Context) (*Conn, error) { + return NewConnection(func() (*dbus.Conn, error) { + // We skip Hello when talking directly to systemd. + return dbusAuthConnection(ctx, func(opts ...dbus.ConnOption) (*dbus.Conn, error) { + return dbus.Dial("unix:path=/run/systemd/private", opts...) + }) + }) +} + +// Close closes an established connection. +func (c *Conn) Close() { + c.sysconn.Close() + c.sigconn.Close() +} + +// Connected returns whether conn is connected +func (c *Conn) Connected() bool { + return c.sysconn.Connected() && c.sigconn.Connected() +} + +// NewConnection establishes a connection to a bus using a caller-supplied function. +// This allows connecting to remote buses through a user-supplied mechanism. +// The supplied function may be called multiple times, and should return independent connections. +// The returned connection must be fully initialised: the org.freedesktop.DBus.Hello call must have succeeded, +// and any authentication should be handled by the function. +func NewConnection(dialBus func() (*dbus.Conn, error)) (*Conn, error) { + sysconn, err := dialBus() + if err != nil { + return nil, err + } + + sigconn, err := dialBus() + if err != nil { + sysconn.Close() + return nil, err + } + + c := &Conn{ + sysconn: sysconn, + sysobj: systemdObject(sysconn), + sigconn: sigconn, + sigobj: systemdObject(sigconn), + } + + c.subStateSubscriber.ignore = make(map[dbus.ObjectPath]int64) + c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string) + + // Setup the listeners on jobs so that we can get completions + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'") + + c.dispatch() + return c, nil +} + +// GetManagerProperty returns the value of a property on the org.freedesktop.systemd1.Manager +// interface. The value is returned in its string representation, as defined at +// https://developer.gnome.org/glib/unstable/gvariant-text.html. +func (c *Conn) GetManagerProperty(prop string) (string, error) { + variant, err := c.sysobj.GetProperty("org.freedesktop.systemd1.Manager." + prop) + if err != nil { + return "", err + } + return variant.String(), nil +} + +func dbusAuthConnection(ctx context.Context, createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { + conn, err := createBus(dbus.WithContext(ctx)) + if err != nil { + return nil, err + } + + // Only use EXTERNAL method, and hardcode the uid (not username) + // to avoid a username lookup (which requires a dynamically linked + // libc) + methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))} + + err = conn.Auth(methods) + if err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} + +func dbusAuthHelloConnection(ctx context.Context, createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { + conn, err := dbusAuthConnection(ctx, createBus) + if err != nil { + return nil, err + } + + if err = conn.Hello(); err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} + +func systemdObject(conn *dbus.Conn) dbus.BusObject { + return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1")) +} diff --git a/go-controller/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go b/go-controller/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go new file mode 100644 index 0000000000..074148cb4d --- /dev/null +++ b/go-controller/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go @@ -0,0 +1,864 @@ +// Copyright 2015, 2018 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "context" + "errors" + "fmt" + "path" + "strconv" + + "github.com/godbus/dbus/v5" +) + +// Who can be used to specify which process to kill in the unit via the KillUnitWithTarget API +type Who string + +const ( + // All sends the signal to all processes in the unit + All Who = "all" + // Main sends the signal to the main process of the unit + Main Who = "main" + // Control sends the signal to the control process of the unit + Control Who = "control" +) + +func (c *Conn) jobComplete(signal *dbus.Signal) { + var id uint32 + var job dbus.ObjectPath + var unit string + var result string + dbus.Store(signal.Body, &id, &job, &unit, &result) + c.jobListener.Lock() + out, ok := c.jobListener.jobs[job] + if ok { + out <- result + delete(c.jobListener.jobs, job) + } + c.jobListener.Unlock() +} + +func (c *Conn) startJob(ctx context.Context, ch chan<- string, job string, args ...interface{}) (int, error) { + if ch != nil { + c.jobListener.Lock() + defer c.jobListener.Unlock() + } + + var p dbus.ObjectPath + err := c.sysobj.CallWithContext(ctx, job, 0, args...).Store(&p) + if err != nil { + return 0, err + } + + if ch != nil { + c.jobListener.jobs[p] = ch + } + + // ignore error since 0 is fine if conversion fails + jobID, _ := strconv.Atoi(path.Base(string(p))) + + return jobID, nil +} + +// Deprecated: use StartUnitContext instead. +func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.StartUnitContext(context.Background(), name, mode, ch) +} + +// StartUnitContext enqueues a start job and depending jobs, if any (unless otherwise +// specified by the mode string). +// +// Takes the unit to activate, plus a mode string. The mode needs to be one of +// replace, fail, isolate, ignore-dependencies, ignore-requirements. If +// "replace" the call will start the unit and its dependencies, possibly +// replacing already queued jobs that conflict with this. If "fail" the call +// will start the unit and its dependencies, but will fail if this would change +// an already queued job. If "isolate" the call will start the unit in question +// and terminate all units that aren't dependencies of it. If +// "ignore-dependencies" it will start a unit but ignore all its dependencies. +// If "ignore-requirements" it will start a unit but only ignore the +// requirement dependencies. It is not recommended to make use of the latter +// two options. +// +// If the provided channel is non-nil, a result string will be sent to it upon +// job completion: one of done, canceled, timeout, failed, dependency, skipped. +// done indicates successful execution of a job. canceled indicates that a job +// has been canceled before it finished execution. timeout indicates that the +// job timeout was reached. failed indicates that the job failed. dependency +// indicates that a job this job has been depending on failed and the job hence +// has been removed too. skipped indicates that a job was skipped because it +// didn't apply to the units current state. +// +// If no error occurs, the ID of the underlying systemd job will be returned. There +// does exist the possibility for no error to be returned, but for the returned job +// ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint +// should not be considered authoritative. +// +// If an error does occur, it will be returned to the user alongside a job ID of 0. +func (c *Conn) StartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode) +} + +// Deprecated: use StopUnitContext instead. +func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) { + return c.StopUnitContext(context.Background(), name, mode, ch) +} + +// StopUnitContext is similar to StartUnitContext, but stops the specified unit +// rather than starting it. +func (c *Conn) StopUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode) +} + +// Deprecated: use ReloadUnitContext instead. +func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) { + return c.ReloadUnitContext(context.Background(), name, mode, ch) +} + +// ReloadUnitContext reloads a unit. Reloading is done only if the unit +// is already running, and fails otherwise. +func (c *Conn) ReloadUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode) +} + +// Deprecated: use RestartUnitContext instead. +func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.RestartUnitContext(context.Background(), name, mode, ch) +} + +// RestartUnitContext restarts a service. If a service is restarted that isn't +// running it will be started. +func (c *Conn) RestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode) +} + +// Deprecated: use TryRestartUnitContext instead. +func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.TryRestartUnitContext(context.Background(), name, mode, ch) +} + +// TryRestartUnitContext is like RestartUnitContext, except that a service that +// isn't running is not affected by the restart. +func (c *Conn) TryRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode) +} + +// Deprecated: use ReloadOrRestartUnitContext instead. +func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.ReloadOrRestartUnitContext(context.Background(), name, mode, ch) +} + +// ReloadOrRestartUnitContext attempts a reload if the unit supports it and use +// a restart otherwise. +func (c *Conn) ReloadOrRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode) +} + +// Deprecated: use ReloadOrTryRestartUnitContext instead. +func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.ReloadOrTryRestartUnitContext(context.Background(), name, mode, ch) +} + +// ReloadOrTryRestartUnitContext attempts a reload if the unit supports it, +// and use a "Try" flavored restart otherwise. +func (c *Conn) ReloadOrTryRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode) +} + +// Deprecated: use StartTransientUnitContext instead. +func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) { + return c.StartTransientUnitContext(context.Background(), name, mode, properties, ch) +} + +// StartTransientUnitContext may be used to create and start a transient unit, which +// will be released as soon as it is not running or referenced anymore or the +// system is rebooted. name is the unit name including suffix, and must be +// unique. mode is the same as in StartUnitContext, properties contains properties +// of the unit. +func (c *Conn) StartTransientUnitContext(ctx context.Context, name string, mode string, properties []Property, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0)) +} + +// Deprecated: use KillUnitContext instead. +func (c *Conn) KillUnit(name string, signal int32) { + c.KillUnitContext(context.Background(), name, signal) +} + +// KillUnitContext takes the unit name and a UNIX signal number to send. +// All of the unit's processes are killed. +func (c *Conn) KillUnitContext(ctx context.Context, name string, signal int32) { + c.KillUnitWithTarget(ctx, name, All, signal) +} + +// KillUnitWithTarget is like KillUnitContext, but allows you to specify which +// process in the unit to send the signal to. +func (c *Conn) KillUnitWithTarget(ctx context.Context, name string, target Who, signal int32) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.KillUnit", 0, name, string(target), signal).Store() +} + +// Deprecated: use ResetFailedUnitContext instead. +func (c *Conn) ResetFailedUnit(name string) error { + return c.ResetFailedUnitContext(context.Background(), name) +} + +// ResetFailedUnitContext resets the "failed" state of a specific unit. +func (c *Conn) ResetFailedUnitContext(ctx context.Context, name string) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store() +} + +// Deprecated: use SystemStateContext instead. +func (c *Conn) SystemState() (*Property, error) { + return c.SystemStateContext(context.Background()) +} + +// SystemStateContext returns the systemd state. Equivalent to +// systemctl is-system-running. +func (c *Conn) SystemStateContext(ctx context.Context) (*Property, error) { + var err error + var prop dbus.Variant + + obj := c.sysconn.Object("org.freedesktop.systemd1", "/org/freedesktop/systemd1") + err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.Get", 0, "org.freedesktop.systemd1.Manager", "SystemState").Store(&prop) + if err != nil { + return nil, err + } + + return &Property{Name: "SystemState", Value: prop}, nil +} + +// getProperties takes the unit path and returns all of its dbus object properties, for the given dbus interface. +func (c *Conn) getProperties(ctx context.Context, path dbus.ObjectPath, dbusInterface string) (map[string]interface{}, error) { + var err error + var props map[string]dbus.Variant + + if !path.IsValid() { + return nil, fmt.Errorf("invalid unit name: %v", path) + } + + obj := c.sysconn.Object("org.freedesktop.systemd1", path) + err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props) + if err != nil { + return nil, err + } + + out := make(map[string]interface{}, len(props)) + for k, v := range props { + out[k] = v.Value() + } + + return out, nil +} + +// Deprecated: use GetUnitPropertiesContext instead. +func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) { + return c.GetUnitPropertiesContext(context.Background(), unit) +} + +// GetUnitPropertiesContext takes the (unescaped) unit name and returns all of +// its dbus object properties. +func (c *Conn) GetUnitPropertiesContext(ctx context.Context, unit string) (map[string]interface{}, error) { + path := unitPath(unit) + return c.getProperties(ctx, path, "org.freedesktop.systemd1.Unit") +} + +// Deprecated: use GetUnitPathPropertiesContext instead. +func (c *Conn) GetUnitPathProperties(path dbus.ObjectPath) (map[string]interface{}, error) { + return c.GetUnitPathPropertiesContext(context.Background(), path) +} + +// GetUnitPathPropertiesContext takes the (escaped) unit path and returns all +// of its dbus object properties. +func (c *Conn) GetUnitPathPropertiesContext(ctx context.Context, path dbus.ObjectPath) (map[string]interface{}, error) { + return c.getProperties(ctx, path, "org.freedesktop.systemd1.Unit") +} + +// Deprecated: use GetAllPropertiesContext instead. +func (c *Conn) GetAllProperties(unit string) (map[string]interface{}, error) { + return c.GetAllPropertiesContext(context.Background(), unit) +} + +// GetAllPropertiesContext takes the (unescaped) unit name and returns all of +// its dbus object properties. +func (c *Conn) GetAllPropertiesContext(ctx context.Context, unit string) (map[string]interface{}, error) { + path := unitPath(unit) + return c.getProperties(ctx, path, "") +} + +func (c *Conn) getProperty(ctx context.Context, unit string, dbusInterface string, propertyName string) (*Property, error) { + var err error + var prop dbus.Variant + + path := unitPath(unit) + if !path.IsValid() { + return nil, errors.New("invalid unit name: " + unit) + } + + obj := c.sysconn.Object("org.freedesktop.systemd1", path) + err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop) + if err != nil { + return nil, err + } + + return &Property{Name: propertyName, Value: prop}, nil +} + +// Deprecated: use GetUnitPropertyContext instead. +func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) { + return c.GetUnitPropertyContext(context.Background(), unit, propertyName) +} + +// GetUnitPropertyContext takes an (unescaped) unit name, and a property name, +// and returns the property value. +func (c *Conn) GetUnitPropertyContext(ctx context.Context, unit string, propertyName string) (*Property, error) { + return c.getProperty(ctx, unit, "org.freedesktop.systemd1.Unit", propertyName) +} + +// Deprecated: use GetServicePropertyContext instead. +func (c *Conn) GetServiceProperty(service string, propertyName string) (*Property, error) { + return c.GetServicePropertyContext(context.Background(), service, propertyName) +} + +// GetServiceProperty returns property for given service name and property name. +func (c *Conn) GetServicePropertyContext(ctx context.Context, service string, propertyName string) (*Property, error) { + return c.getProperty(ctx, service, "org.freedesktop.systemd1.Service", propertyName) +} + +// Deprecated: use GetUnitTypePropertiesContext instead. +func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) { + return c.GetUnitTypePropertiesContext(context.Background(), unit, unitType) +} + +// GetUnitTypePropertiesContext returns the extra properties for a unit, specific to the unit type. +// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope. +// Returns "dbus.Error: Unknown interface" error if the unitType is not the correct type of the unit. +func (c *Conn) GetUnitTypePropertiesContext(ctx context.Context, unit string, unitType string) (map[string]interface{}, error) { + path := unitPath(unit) + return c.getProperties(ctx, path, "org.freedesktop.systemd1."+unitType) +} + +// Deprecated: use SetUnitPropertiesContext instead. +func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error { + return c.SetUnitPropertiesContext(context.Background(), name, runtime, properties...) +} + +// SetUnitPropertiesContext may be used to modify certain unit properties at runtime. +// Not all properties may be changed at runtime, but many resource management +// settings (primarily those in systemd.cgroup(5)) may. The changes are applied +// instantly, and stored on disk for future boots, unless runtime is true, in which +// case the settings only apply until the next reboot. name is the name of the unit +// to modify. properties are the settings to set, encoded as an array of property +// name and value pairs. +func (c *Conn) SetUnitPropertiesContext(ctx context.Context, name string, runtime bool, properties ...Property) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store() +} + +// Deprecated: use GetUnitTypePropertyContext instead. +func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) { + return c.GetUnitTypePropertyContext(context.Background(), unit, unitType, propertyName) +} + +// GetUnitTypePropertyContext takes a property name, a unit name, and a unit type, +// and returns a property value. For valid values of unitType, see GetUnitTypePropertiesContext. +func (c *Conn) GetUnitTypePropertyContext(ctx context.Context, unit string, unitType string, propertyName string) (*Property, error) { + return c.getProperty(ctx, unit, "org.freedesktop.systemd1."+unitType, propertyName) +} + +type UnitStatus struct { + Name string // The primary unit name as string + Description string // The human readable description string + LoadState string // The load state (i.e. whether the unit file has been loaded successfully) + ActiveState string // The active state (i.e. whether the unit is currently started or not) + SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not) + Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string. + Path dbus.ObjectPath // The unit object path + JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise + JobType string // The job type as string + JobPath dbus.ObjectPath // The job object path +} + +type storeFunc func(retvalues ...interface{}) error + +func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) { + result := make([][]interface{}, 0) + err := f(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + status := make([]UnitStatus, len(result)) + statusInterface := make([]interface{}, len(status)) + for i := range status { + statusInterface[i] = &status[i] + } + + err = dbus.Store(resultInterface, statusInterface...) + if err != nil { + return nil, err + } + + return status, nil +} + +// GetUnitByPID returns the unit object path of the unit a process ID +// belongs to. It takes a UNIX PID and returns the object path. The PID must +// refer to an existing system process +func (c *Conn) GetUnitByPID(ctx context.Context, pid uint32) (dbus.ObjectPath, error) { + var result dbus.ObjectPath + + err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.GetUnitByPID", 0, pid).Store(&result) + + return result, err +} + +// GetUnitNameByPID returns the name of the unit a process ID belongs to. It +// takes a UNIX PID and returns the object path. The PID must refer to an +// existing system process +func (c *Conn) GetUnitNameByPID(ctx context.Context, pid uint32) (string, error) { + path, err := c.GetUnitByPID(ctx, pid) + if err != nil { + return "", err + } + + return unitName(path), nil +} + +// Deprecated: use ListUnitsContext instead. +func (c *Conn) ListUnits() ([]UnitStatus, error) { + return c.ListUnitsContext(context.Background()) +} + +// ListUnitsContext returns an array with all currently loaded units. Note that +// units may be known by multiple names at the same time, and hence there might +// be more unit names loaded than actual units behind them. +// Also note that a unit is only loaded if it is active and/or enabled. +// Units that are both disabled and inactive will thus not be returned. +func (c *Conn) ListUnitsContext(ctx context.Context) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnits", 0).Store) +} + +// Deprecated: use ListUnitsFilteredContext instead. +func (c *Conn) ListUnitsFiltered(states []string) ([]UnitStatus, error) { + return c.ListUnitsFilteredContext(context.Background(), states) +} + +// ListUnitsFilteredContext returns an array with units filtered by state. +// It takes a list of units' statuses to filter. +func (c *Conn) ListUnitsFilteredContext(ctx context.Context, states []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsFiltered", 0, states).Store) +} + +// Deprecated: use ListUnitsByPatternsContext instead. +func (c *Conn) ListUnitsByPatterns(states []string, patterns []string) ([]UnitStatus, error) { + return c.ListUnitsByPatternsContext(context.Background(), states, patterns) +} + +// ListUnitsByPatternsContext returns an array with units. +// It takes a list of units' statuses and names to filter. +// Note that units may be known by multiple names at the same time, +// and hence there might be more unit names loaded than actual units behind them. +func (c *Conn) ListUnitsByPatternsContext(ctx context.Context, states []string, patterns []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsByPatterns", 0, states, patterns).Store) +} + +// Deprecated: use ListUnitsByNamesContext instead. +func (c *Conn) ListUnitsByNames(units []string) ([]UnitStatus, error) { + return c.ListUnitsByNamesContext(context.Background(), units) +} + +// ListUnitsByNamesContext returns an array with units. It takes a list of units' +// names and returns an UnitStatus array. Comparing to ListUnitsByPatternsContext +// method, this method returns statuses even for inactive or non-existing +// units. Input array should contain exact unit names, but not patterns. +// +// Requires systemd v230 or higher. +func (c *Conn) ListUnitsByNamesContext(ctx context.Context, units []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsByNames", 0, units).Store) +} + +type UnitFile struct { + Path string + Type string +} + +func (c *Conn) listUnitFilesInternal(f storeFunc) ([]UnitFile, error) { + result := make([][]interface{}, 0) + err := f(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + files := make([]UnitFile, len(result)) + fileInterface := make([]interface{}, len(files)) + for i := range files { + fileInterface[i] = &files[i] + } + + err = dbus.Store(resultInterface, fileInterface...) + if err != nil { + return nil, err + } + + return files, nil +} + +// Deprecated: use ListUnitFilesContext instead. +func (c *Conn) ListUnitFiles() ([]UnitFile, error) { + return c.ListUnitFilesContext(context.Background()) +} + +// ListUnitFiles returns an array of all available units on disk. +func (c *Conn) ListUnitFilesContext(ctx context.Context) ([]UnitFile, error) { + return c.listUnitFilesInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store) +} + +// Deprecated: use ListUnitFilesByPatternsContext instead. +func (c *Conn) ListUnitFilesByPatterns(states []string, patterns []string) ([]UnitFile, error) { + return c.ListUnitFilesByPatternsContext(context.Background(), states, patterns) +} + +// ListUnitFilesByPatternsContext returns an array of all available units on disk matched the patterns. +func (c *Conn) ListUnitFilesByPatternsContext(ctx context.Context, states []string, patterns []string) ([]UnitFile, error) { + return c.listUnitFilesInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitFilesByPatterns", 0, states, patterns).Store) +} + +type LinkUnitFileChange EnableUnitFileChange + +// Deprecated: use LinkUnitFilesContext instead. +func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { + return c.LinkUnitFilesContext(context.Background(), files, runtime, force) +} + +// LinkUnitFilesContext links unit files (that are located outside of the +// usual unit search paths) into the unit search path. +// +// It takes a list of absolute paths to unit files to link and two +// booleans. +// +// The first boolean controls whether the unit shall be +// enabled for runtime only (true, /run), or persistently (false, +// /etc). +// +// The second controls whether symlinks pointing to other units shall +// be replaced if necessary. +// +// This call returns a list of the changes made. The list consists of +// structures with three strings: the type of the change (one of symlink +// or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) LinkUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]LinkUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +// Deprecated: use EnableUnitFilesContext instead. +func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { + return c.EnableUnitFilesContext(context.Background(), files, runtime, force) +} + +// EnableUnitFilesContext may be used to enable one or more units in the system +// (by creating symlinks to them in /etc or /run). +// +// It takes a list of unit files to enable (either just file names or full +// absolute paths if the unit files are residing outside the usual unit +// search paths), and two booleans: the first controls whether the unit shall +// be enabled for runtime only (true, /run), or persistently (false, /etc). +// The second one controls whether symlinks pointing to other units shall +// be replaced if necessary. +// +// This call returns one boolean and an array with the changes made. The +// boolean signals whether the unit files contained any enablement +// information (i.e. an [Install]) section. The changes list consists of +// structures with three strings: the type of the change (one of symlink +// or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) EnableUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { + var carries_install_info bool + + result := make([][]interface{}, 0) + err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result) + if err != nil { + return false, nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]EnableUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return false, nil, err + } + + return carries_install_info, changes, nil +} + +type EnableUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// Deprecated: use DisableUnitFilesContext instead. +func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) { + return c.DisableUnitFilesContext(context.Background(), files, runtime) +} + +// DisableUnitFilesContext may be used to disable one or more units in the +// system (by removing symlinks to them from /etc or /run). +// +// It takes a list of unit files to disable (either just file names or full +// absolute paths if the unit files are residing outside the usual unit +// search paths), and one boolean: whether the unit was enabled for runtime +// only (true, /run), or persistently (false, /etc). +// +// This call returns an array with the changes made. The changes list +// consists of structures with three strings: the type of the change (one of +// symlink or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) DisableUnitFilesContext(ctx context.Context, files []string, runtime bool) ([]DisableUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]DisableUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type DisableUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// Deprecated: use MaskUnitFilesContext instead. +func (c *Conn) MaskUnitFiles(files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) { + return c.MaskUnitFilesContext(context.Background(), files, runtime, force) +} + +// MaskUnitFilesContext masks one or more units in the system. +// +// The files argument contains a list of units to mask (either just file names +// or full absolute paths if the unit files are residing outside the usual unit +// search paths). +// +// The runtime argument is used to specify whether the unit was enabled for +// runtime only (true, /run/systemd/..), or persistently (false, +// /etc/systemd/..). +func (c *Conn) MaskUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.MaskUnitFiles", 0, files, runtime, force).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]MaskUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type MaskUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// Deprecated: use UnmaskUnitFilesContext instead. +func (c *Conn) UnmaskUnitFiles(files []string, runtime bool) ([]UnmaskUnitFileChange, error) { + return c.UnmaskUnitFilesContext(context.Background(), files, runtime) +} + +// UnmaskUnitFilesContext unmasks one or more units in the system. +// +// It takes the list of unit files to mask (either just file names or full +// absolute paths if the unit files are residing outside the usual unit search +// paths), and a boolean runtime flag to specify whether the unit was enabled +// for runtime only (true, /run/systemd/..), or persistently (false, +// /etc/systemd/..). +func (c *Conn) UnmaskUnitFilesContext(ctx context.Context, files []string, runtime bool) ([]UnmaskUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.UnmaskUnitFiles", 0, files, runtime).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]UnmaskUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type UnmaskUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// Deprecated: use ReloadContext instead. +func (c *Conn) Reload() error { + return c.ReloadContext(context.Background()) +} + +// ReloadContext instructs systemd to scan for and reload unit files. This is +// an equivalent to systemctl daemon-reload. +func (c *Conn) ReloadContext(ctx context.Context) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.Reload", 0).Store() +} + +func unitPath(name string) dbus.ObjectPath { + return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name)) +} + +// unitName returns the unescaped base element of the supplied escaped path. +func unitName(dpath dbus.ObjectPath) string { + return pathBusUnescape(path.Base(string(dpath))) +} + +// JobStatus holds a currently queued job definition. +type JobStatus struct { + Id uint32 // The numeric job id + Unit string // The primary unit name for this job + JobType string // The job type as string + Status string // The job state as string + JobPath dbus.ObjectPath // The job object path + UnitPath dbus.ObjectPath // The unit object path +} + +// Deprecated: use ListJobsContext instead. +func (c *Conn) ListJobs() ([]JobStatus, error) { + return c.ListJobsContext(context.Background()) +} + +// ListJobsContext returns an array with all currently queued jobs. +func (c *Conn) ListJobsContext(ctx context.Context) ([]JobStatus, error) { + return c.listJobsInternal(ctx) +} + +func (c *Conn) listJobsInternal(ctx context.Context) ([]JobStatus, error) { + result := make([][]interface{}, 0) + if err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListJobs", 0).Store(&result); err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + status := make([]JobStatus, len(result)) + statusInterface := make([]interface{}, len(status)) + for i := range status { + statusInterface[i] = &status[i] + } + + if err := dbus.Store(resultInterface, statusInterface...); err != nil { + return nil, err + } + + return status, nil +} + +// Freeze the cgroup associated with the unit. +// Note that FreezeUnit and ThawUnit are only supported on systems running with cgroup v2. +func (c *Conn) FreezeUnit(ctx context.Context, unit string) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.FreezeUnit", 0, unit).Store() +} + +// Unfreeze the cgroup associated with the unit. +func (c *Conn) ThawUnit(ctx context.Context, unit string) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ThawUnit", 0, unit).Store() +} diff --git a/go-controller/vendor/github.com/coreos/go-systemd/v22/dbus/properties.go b/go-controller/vendor/github.com/coreos/go-systemd/v22/dbus/properties.go new file mode 100644 index 0000000000..fb42b62733 --- /dev/null +++ b/go-controller/vendor/github.com/coreos/go-systemd/v22/dbus/properties.go @@ -0,0 +1,237 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "github.com/godbus/dbus/v5" +) + +// From the systemd docs: +// +// The properties array of StartTransientUnit() may take many of the settings +// that may also be configured in unit files. Not all parameters are currently +// accepted though, but we plan to cover more properties with future release. +// Currently you may set the Description, Slice and all dependency types of +// units, as well as RemainAfterExit, ExecStart for service units, +// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares, +// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth, +// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit, +// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map +// directly to their counterparts in unit files and as normal D-Bus object +// properties. The exception here is the PIDs field of scope units which is +// used for construction of the scope only and specifies the initial PIDs to +// add to the scope object. + +type Property struct { + Name string + Value dbus.Variant +} + +type PropertyCollection struct { + Name string + Properties []Property +} + +type execStart struct { + Path string // the binary path to execute + Args []string // an array with all arguments to pass to the executed command, starting with argument 0 + UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly +} + +// PropExecStart sets the ExecStart service property. The first argument is a +// slice with the binary path to execute followed by the arguments to pass to +// the executed command. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart= +func PropExecStart(command []string, uncleanIsFailure bool) Property { + execStarts := []execStart{ + { + Path: command[0], + Args: command, + UncleanIsFailure: uncleanIsFailure, + }, + } + + return Property{ + Name: "ExecStart", + Value: dbus.MakeVariant(execStarts), + } +} + +// PropRemainAfterExit sets the RemainAfterExit service property. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit= +func PropRemainAfterExit(b bool) Property { + return Property{ + Name: "RemainAfterExit", + Value: dbus.MakeVariant(b), + } +} + +// PropType sets the Type service property. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#Type= +func PropType(t string) Property { + return Property{ + Name: "Type", + Value: dbus.MakeVariant(t), + } +} + +// PropDescription sets the Description unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description= +func PropDescription(desc string) Property { + return Property{ + Name: "Description", + Value: dbus.MakeVariant(desc), + } +} + +func propDependency(name string, units []string) Property { + return Property{ + Name: name, + Value: dbus.MakeVariant(units), + } +} + +// PropRequires sets the Requires unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires= +func PropRequires(units ...string) Property { + return propDependency("Requires", units) +} + +// PropRequiresOverridable sets the RequiresOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable= +func PropRequiresOverridable(units ...string) Property { + return propDependency("RequiresOverridable", units) +} + +// PropRequisite sets the Requisite unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite= +func PropRequisite(units ...string) Property { + return propDependency("Requisite", units) +} + +// PropRequisiteOverridable sets the RequisiteOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable= +func PropRequisiteOverridable(units ...string) Property { + return propDependency("RequisiteOverridable", units) +} + +// PropWants sets the Wants unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants= +func PropWants(units ...string) Property { + return propDependency("Wants", units) +} + +// PropBindsTo sets the BindsTo unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo= +func PropBindsTo(units ...string) Property { + return propDependency("BindsTo", units) +} + +// PropRequiredBy sets the RequiredBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy= +func PropRequiredBy(units ...string) Property { + return propDependency("RequiredBy", units) +} + +// PropRequiredByOverridable sets the RequiredByOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable= +func PropRequiredByOverridable(units ...string) Property { + return propDependency("RequiredByOverridable", units) +} + +// PropWantedBy sets the WantedBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy= +func PropWantedBy(units ...string) Property { + return propDependency("WantedBy", units) +} + +// PropBoundBy sets the BoundBy unit property. See +// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy= +func PropBoundBy(units ...string) Property { + return propDependency("BoundBy", units) +} + +// PropConflicts sets the Conflicts unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts= +func PropConflicts(units ...string) Property { + return propDependency("Conflicts", units) +} + +// PropConflictedBy sets the ConflictedBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy= +func PropConflictedBy(units ...string) Property { + return propDependency("ConflictedBy", units) +} + +// PropBefore sets the Before unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before= +func PropBefore(units ...string) Property { + return propDependency("Before", units) +} + +// PropAfter sets the After unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After= +func PropAfter(units ...string) Property { + return propDependency("After", units) +} + +// PropOnFailure sets the OnFailure unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure= +func PropOnFailure(units ...string) Property { + return propDependency("OnFailure", units) +} + +// PropTriggers sets the Triggers unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers= +func PropTriggers(units ...string) Property { + return propDependency("Triggers", units) +} + +// PropTriggeredBy sets the TriggeredBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy= +func PropTriggeredBy(units ...string) Property { + return propDependency("TriggeredBy", units) +} + +// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo= +func PropPropagatesReloadTo(units ...string) Property { + return propDependency("PropagatesReloadTo", units) +} + +// PropRequiresMountsFor sets the RequiresMountsFor unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor= +func PropRequiresMountsFor(units ...string) Property { + return propDependency("RequiresMountsFor", units) +} + +// PropSlice sets the Slice unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice= +func PropSlice(slice string) Property { + return Property{ + Name: "Slice", + Value: dbus.MakeVariant(slice), + } +} + +// PropPids sets the PIDs field of scope units used in the initial construction +// of the scope only and specifies the initial PIDs to add to the scope object. +// See https://www.freedesktop.org/wiki/Software/systemd/ControlGroupInterface/#properties +func PropPids(pids ...uint32) Property { + return Property{ + Name: "PIDs", + Value: dbus.MakeVariant(pids), + } +} diff --git a/go-controller/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/doc.go b/go-controller/vendor/github.com/coreos/go-systemd/v22/dbus/set.go similarity index 51% rename from go-controller/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/doc.go rename to go-controller/vendor/github.com/coreos/go-systemd/v22/dbus/set.go index c318385cbe..17c5d48565 100644 --- a/go-controller/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/doc.go +++ b/go-controller/vendor/github.com/coreos/go-systemd/v22/dbus/set.go @@ -1,4 +1,4 @@ -// Copyright 2013 Matt T. Proud +// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,5 +12,36 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package pbutil provides record length-delimited Protocol Buffer streaming. -package pbutil +package dbus + +type set struct { + data map[string]bool +} + +func (s *set) Add(value string) { + s.data[value] = true +} + +func (s *set) Remove(value string) { + delete(s.data, value) +} + +func (s *set) Contains(value string) (exists bool) { + _, exists = s.data[value] + return +} + +func (s *set) Length() int { + return len(s.data) +} + +func (s *set) Values() (values []string) { + for val := range s.data { + values = append(values, val) + } + return +} + +func newSet() *set { + return &set{make(map[string]bool)} +} diff --git a/go-controller/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go b/go-controller/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go new file mode 100644 index 0000000000..7e370fea21 --- /dev/null +++ b/go-controller/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go @@ -0,0 +1,333 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "errors" + "log" + "time" + + "github.com/godbus/dbus/v5" +) + +const ( + cleanIgnoreInterval = int64(10 * time.Second) + ignoreInterval = int64(30 * time.Millisecond) +) + +// Subscribe sets up this connection to subscribe to all systemd dbus events. +// This is required before calling SubscribeUnits. When the connection closes +// systemd will automatically stop sending signals so there is no need to +// explicitly call Unsubscribe(). +func (c *Conn) Subscribe() error { + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'") + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'") + + return c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store() +} + +// Unsubscribe this connection from systemd dbus events. +func (c *Conn) Unsubscribe() error { + return c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store() +} + +func (c *Conn) dispatch() { + ch := make(chan *dbus.Signal, signalBuffer) + + c.sigconn.Signal(ch) + + go func() { + for { + signal, ok := <-ch + if !ok { + return + } + + if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" { + c.jobComplete(signal) + } + + if c.subStateSubscriber.updateCh == nil && + c.propertiesSubscriber.updateCh == nil { + continue + } + + var unitPath dbus.ObjectPath + switch signal.Name { + case "org.freedesktop.systemd1.Manager.JobRemoved": + unitName := signal.Body[2].(string) + c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath) + case "org.freedesktop.systemd1.Manager.UnitNew": + unitPath = signal.Body[1].(dbus.ObjectPath) + case "org.freedesktop.DBus.Properties.PropertiesChanged": + if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" { + unitPath = signal.Path + + if len(signal.Body) >= 2 { + if changed, ok := signal.Body[1].(map[string]dbus.Variant); ok { + c.sendPropertiesUpdate(unitPath, changed) + } + } + } + } + + if unitPath == dbus.ObjectPath("") { + continue + } + + c.sendSubStateUpdate(unitPath) + } + }() +} + +// SubscribeUnits returns two unbuffered channels which will receive all changed units every +// interval. Deleted units are sent as nil. +func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) { + return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil) +} + +// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer +// size of the channels, the comparison function for detecting changes and a filter +// function for cutting down on the noise that your channel receives. +func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) { + old := make(map[string]*UnitStatus) + statusChan := make(chan map[string]*UnitStatus, buffer) + errChan := make(chan error, buffer) + + go func() { + for { + timerChan := time.After(interval) + + units, err := c.ListUnits() + if err == nil { + cur := make(map[string]*UnitStatus) + for i := range units { + if filterUnit != nil && filterUnit(units[i].Name) { + continue + } + cur[units[i].Name] = &units[i] + } + + // add all new or changed units + changed := make(map[string]*UnitStatus) + for n, u := range cur { + if oldU, ok := old[n]; !ok || isChanged(oldU, u) { + changed[n] = u + } + delete(old, n) + } + + // add all deleted units + for oldN := range old { + changed[oldN] = nil + } + + old = cur + + if len(changed) != 0 { + statusChan <- changed + } + } else { + errChan <- err + } + + <-timerChan + } + }() + + return statusChan, errChan +} + +type SubStateUpdate struct { + UnitName string + SubState string +} + +// SetSubStateSubscriber writes to updateCh when any unit's substate changes. +// Although this writes to updateCh on every state change, the reported state +// may be more recent than the change that generated it (due to an unavoidable +// race in the systemd dbus interface). That is, this method provides a good +// way to keep a current view of all units' states, but is not guaranteed to +// show every state transition they go through. Furthermore, state changes +// will only be written to the channel with non-blocking writes. If updateCh +// is full, it attempts to write an error to errCh; if errCh is full, the error +// passes silently. +func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) { + if c == nil { + msg := "nil receiver" + select { + case errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", msg) + } + return + } + + c.subStateSubscriber.Lock() + defer c.subStateSubscriber.Unlock() + c.subStateSubscriber.updateCh = updateCh + c.subStateSubscriber.errCh = errCh +} + +func (c *Conn) sendSubStateUpdate(unitPath dbus.ObjectPath) { + c.subStateSubscriber.Lock() + defer c.subStateSubscriber.Unlock() + + if c.subStateSubscriber.updateCh == nil { + return + } + + isIgnored := c.shouldIgnore(unitPath) + defer c.cleanIgnore() + if isIgnored { + return + } + + info, err := c.GetUnitPathProperties(unitPath) + if err != nil { + select { + case c.subStateSubscriber.errCh <- err: + default: + log.Printf("full error channel while reporting: %s\n", err) + } + return + } + defer c.updateIgnore(unitPath, info) + + name, ok := info["Id"].(string) + if !ok { + msg := "failed to cast info.Id" + select { + case c.subStateSubscriber.errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", err) + } + return + } + substate, ok := info["SubState"].(string) + if !ok { + msg := "failed to cast info.SubState" + select { + case c.subStateSubscriber.errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", msg) + } + return + } + + update := &SubStateUpdate{name, substate} + select { + case c.subStateSubscriber.updateCh <- update: + default: + msg := "update channel is full" + select { + case c.subStateSubscriber.errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", msg) + } + return + } +} + +// The ignore functions work around a wart in the systemd dbus interface. +// Requesting the properties of an unloaded unit will cause systemd to send a +// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's +// properties on UnitNew (as that's the only indication of a new unit coming up +// for the first time), we would enter an infinite loop if we did not attempt +// to detect and ignore these spurious signals. The signal themselves are +// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an +// unloaded unit's signals for a short time after requesting its properties. +// This means that we will miss e.g. a transient unit being restarted +// *immediately* upon failure and also a transient unit being started +// immediately after requesting its status (with systemctl status, for example, +// because this causes a UnitNew signal to be sent which then causes us to fetch +// the properties). + +func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool { + t, ok := c.subStateSubscriber.ignore[path] + return ok && t >= time.Now().UnixNano() +} + +func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) { + loadState, ok := info["LoadState"].(string) + if !ok { + return + } + + // unit is unloaded - it will trigger bad systemd dbus behavior + if loadState == "not-found" { + c.subStateSubscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval + } +} + +// without this, ignore would grow unboundedly over time +func (c *Conn) cleanIgnore() { + now := time.Now().UnixNano() + if c.subStateSubscriber.cleanIgnore < now { + c.subStateSubscriber.cleanIgnore = now + cleanIgnoreInterval + + for p, t := range c.subStateSubscriber.ignore { + if t < now { + delete(c.subStateSubscriber.ignore, p) + } + } + } +} + +// PropertiesUpdate holds a map of a unit's changed properties +type PropertiesUpdate struct { + UnitName string + Changed map[string]dbus.Variant +} + +// SetPropertiesSubscriber writes to updateCh when any unit's properties +// change. Every property change reported by systemd will be sent; that is, no +// transitions will be "missed" (as they might be with SetSubStateSubscriber). +// However, state changes will only be written to the channel with non-blocking +// writes. If updateCh is full, it attempts to write an error to errCh; if +// errCh is full, the error passes silently. +func (c *Conn) SetPropertiesSubscriber(updateCh chan<- *PropertiesUpdate, errCh chan<- error) { + c.propertiesSubscriber.Lock() + defer c.propertiesSubscriber.Unlock() + c.propertiesSubscriber.updateCh = updateCh + c.propertiesSubscriber.errCh = errCh +} + +// we don't need to worry about shouldIgnore() here because +// sendPropertiesUpdate doesn't call GetProperties() +func (c *Conn) sendPropertiesUpdate(unitPath dbus.ObjectPath, changedProps map[string]dbus.Variant) { + c.propertiesSubscriber.Lock() + defer c.propertiesSubscriber.Unlock() + + if c.propertiesSubscriber.updateCh == nil { + return + } + + update := &PropertiesUpdate{unitName(unitPath), changedProps} + + select { + case c.propertiesSubscriber.updateCh <- update: + default: + msg := "update channel is full" + select { + case c.propertiesSubscriber.errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", msg) + } + return + } +} diff --git a/go-controller/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go b/go-controller/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go new file mode 100644 index 0000000000..5b408d5847 --- /dev/null +++ b/go-controller/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go @@ -0,0 +1,57 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "time" +) + +// SubscriptionSet returns a subscription set which is like conn.Subscribe but +// can filter to only return events for a set of units. +type SubscriptionSet struct { + *set + conn *Conn +} + +func (s *SubscriptionSet) filter(unit string) bool { + return !s.Contains(unit) +} + +// Subscribe starts listening for dbus events for all of the units in the set. +// Returns channels identical to conn.SubscribeUnits. +func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) { + // TODO: Make fully evented by using systemd 209 with properties changed values + return s.conn.SubscribeUnitsCustom(time.Second, 0, + mismatchUnitStatus, + func(unit string) bool { return s.filter(unit) }, + ) +} + +// NewSubscriptionSet returns a new subscription set. +func (conn *Conn) NewSubscriptionSet() *SubscriptionSet { + return &SubscriptionSet{newSet(), conn} +} + +// mismatchUnitStatus returns true if the provided UnitStatus objects +// are not equivalent. false is returned if the objects are equivalent. +// Only the Name, Description and state-related fields are used in +// the comparison. +func mismatchUnitStatus(u1, u2 *UnitStatus) bool { + return u1.Name != u2.Name || + u1.Description != u2.Description || + u1.LoadState != u2.LoadState || + u1.ActiveState != u2.ActiveState || + u1.SubState != u2.SubState +} diff --git a/go-controller/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/go-controller/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go index 4b19188d90..8a290f1972 100644 --- a/go-controller/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go +++ b/go-controller/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go @@ -1,6 +1,7 @@ package md2man import ( + "bufio" "bytes" "fmt" "io" @@ -21,34 +22,35 @@ type roffRenderer struct { } const ( - titleHeader = ".TH " - topLevelHeader = "\n\n.SH " - secondLevelHdr = "\n.SH " - otherHeader = "\n.SS " - crTag = "\n" - emphTag = "\\fI" - emphCloseTag = "\\fP" - strongTag = "\\fB" - strongCloseTag = "\\fP" - breakTag = "\n.br\n" - paraTag = "\n.PP\n" - hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n" - linkTag = "\n\\[la]" - linkCloseTag = "\\[ra]" - codespanTag = "\\fB" - codespanCloseTag = "\\fR" - codeTag = "\n.EX\n" - codeCloseTag = "\n.EE\n" - quoteTag = "\n.PP\n.RS\n" - quoteCloseTag = "\n.RE\n" - listTag = "\n.RS\n" - listCloseTag = "\n.RE\n" - dtTag = "\n.TP\n" - dd2Tag = "\n" - tableStart = "\n.TS\nallbox;\n" - tableEnd = ".TE\n" - tableCellStart = "T{\n" - tableCellEnd = "\nT}\n" + titleHeader = ".TH " + topLevelHeader = "\n\n.SH " + secondLevelHdr = "\n.SH " + otherHeader = "\n.SS " + crTag = "\n" + emphTag = "\\fI" + emphCloseTag = "\\fP" + strongTag = "\\fB" + strongCloseTag = "\\fP" + breakTag = "\n.br\n" + paraTag = "\n.PP\n" + hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n" + linkTag = "\n\\[la]" + linkCloseTag = "\\[ra]" + codespanTag = "\\fB" + codespanCloseTag = "\\fR" + codeTag = "\n.EX\n" + codeCloseTag = ".EE\n" // Do not prepend a newline character since code blocks, by definition, include a newline already (or at least as how blackfriday gives us on). + quoteTag = "\n.PP\n.RS\n" + quoteCloseTag = "\n.RE\n" + listTag = "\n.RS\n" + listCloseTag = "\n.RE\n" + dtTag = "\n.TP\n" + dd2Tag = "\n" + tableStart = "\n.TS\nallbox;\n" + tableEnd = ".TE\n" + tableCellStart = "T{\n" + tableCellEnd = "\nT}\n" + tablePreprocessor = `'\" t` ) // NewRoffRenderer creates a new blackfriday Renderer for generating roff documents @@ -75,6 +77,16 @@ func (r *roffRenderer) GetExtensions() blackfriday.Extensions { // RenderHeader handles outputting the header at document start func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) { + // We need to walk the tree to check if there are any tables. + // If there are, we need to enable the roff table preprocessor. + ast.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + if node.Type == blackfriday.Table { + out(w, tablePreprocessor+"\n") + return blackfriday.Terminate + } + return blackfriday.GoToNext + }) + // disable hyphenation out(w, ".nh\n") } @@ -322,6 +334,28 @@ func out(w io.Writer, output string) { } func escapeSpecialChars(w io.Writer, text []byte) { + scanner := bufio.NewScanner(bytes.NewReader(text)) + + // count the number of lines in the text + // we need to know this to avoid adding a newline after the last line + n := bytes.Count(text, []byte{'\n'}) + idx := 0 + + for scanner.Scan() { + dt := scanner.Bytes() + if idx < n { + idx++ + dt = append(dt, '\n') + } + escapeSpecialCharsLine(w, dt) + } + + if err := scanner.Err(); err != nil { + panic(err) + } +} + +func escapeSpecialCharsLine(w io.Writer, text []byte) { for i := 0; i < len(text); i++ { // escape initial apostrophe or period if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') { diff --git a/go-controller/vendor/github.com/fxamacker/cbor/v2/.gitignore b/go-controller/vendor/github.com/fxamacker/cbor/v2/.gitignore new file mode 100644 index 0000000000..f1c181ec9c --- /dev/null +++ b/go-controller/vendor/github.com/fxamacker/cbor/v2/.gitignore @@ -0,0 +1,12 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/go-controller/vendor/github.com/fxamacker/cbor/v2/.golangci.yml b/go-controller/vendor/github.com/fxamacker/cbor/v2/.golangci.yml new file mode 100644 index 0000000000..38cb9ae101 --- /dev/null +++ b/go-controller/vendor/github.com/fxamacker/cbor/v2/.golangci.yml @@ -0,0 +1,104 @@ +# Do not delete linter settings. Linters like gocritic can be enabled on the command line. + +linters-settings: + depguard: + rules: + prevent_unmaintained_packages: + list-mode: strict + files: + - $all + - "!$test" + allow: + - $gostd + - github.com/x448/float16 + deny: + - pkg: io/ioutil + desc: "replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil" + dupl: + threshold: 100 + funlen: + lines: 100 + statements: 50 + goconst: + ignore-tests: true + min-len: 2 + min-occurrences: 3 + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - commentedOutCode + - dupImport # https://github.com/go-critic/go-critic/issues/845 + - ifElseChain + - octalLiteral + - paramTypeCombine + - whyNoLint + gofmt: + simplify: false + goimports: + local-prefixes: github.com/fxamacker/cbor + golint: + min-confidence: 0 + govet: + check-shadowing: true + lll: + line-length: 140 + maligned: + suggest-new: true + misspell: + locale: US + staticcheck: + checks: ["all"] + +linters: + disable-all: true + enable: + - asciicheck + - bidichk + - depguard + - errcheck + - exportloopref + - goconst + - gocritic + - gocyclo + - gofmt + - goimports + - goprintffuncname + - gosec + - gosimple + - govet + - ineffassign + - misspell + - nilerr + - revive + - staticcheck + - stylecheck + - typecheck + - unconvert + - unused + +issues: + # max-issues-per-linter default is 50. Set to 0 to disable limit. + max-issues-per-linter: 0 + # max-same-issues default is 3. Set to 0 to disable limit. + max-same-issues: 0 + + exclude-rules: + - path: decode.go + text: "string ` overflows ` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string ` \\(range is \\[` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string `, ` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string ` overflows Go's int64` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string `\\]\\)` has (\\d+) occurrences, make it a constant" + - path: valid.go + text: "string ` for type ` has (\\d+) occurrences, make it a constant" + - path: valid.go + text: "string `cbor: ` has (\\d+) occurrences, make it a constant" diff --git a/go-controller/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md b/go-controller/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..c794b2b0c6 --- /dev/null +++ b/go-controller/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md @@ -0,0 +1,133 @@ + +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual +identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the overall + community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or advances of + any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email address, + without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +faye.github@gmail.com. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of +actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or permanent +ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the +community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.1, available at +[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at +[https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations diff --git a/go-controller/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md b/go-controller/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md new file mode 100644 index 0000000000..de0965e12d --- /dev/null +++ b/go-controller/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md @@ -0,0 +1,41 @@ +# How to contribute + +You can contribute by using the library, opening issues, or opening pull requests. + +## Bug reports and security vulnerabilities + +Most issues are tracked publicly on [GitHub](https://github.com/fxamacker/cbor/issues). + +To report security vulnerabilities, please email faye.github@gmail.com and allow time for the problem to be resolved before disclosing it to the public. For more info, see [Security Policy](https://github.com/fxamacker/cbor#security-policy). + +Please do not send data that might contain personally identifiable information, even if you think you have permission. That type of support requires payment and a signed contract where I'm indemnified, held harmless, and defended by you for any data you send to me. + +## Pull requests + +Please [create an issue](https://github.com/fxamacker/cbor/issues/new/choose) before you begin work on a PR. The improvement may have already been considered, etc. + +Pull requests have signing requirements and must not be anonymous. Exceptions are usually made for docs and CI scripts. + +See the [Pull Request Template](https://github.com/fxamacker/cbor/blob/master/.github/pull_request_template.md) for details. + +Pull requests have a greater chance of being approved if: +- it does not reduce speed, increase memory use, reduce security, etc. for people not using the new option or feature. +- it has > 97% code coverage. + +## Describe your issue + +Clearly describe the issue: +* If it's a bug, please provide: **version of this library** and **Go** (`go version`), **unmodified error message**, and describe **how to reproduce it**. Also state **what you expected to happen** instead of the error. +* If you propose a change or addition, try to give an example how the improved code could look like or how to use it. +* If you found a compilation error, please confirm you're using a supported version of Go. If you are, then provide the output of `go version` first, followed by the complete error message. + +## Please don't + +Please don't send data containing personally identifiable information, even if you think you have permission. That type of support requires payment and a contract where I'm indemnified, held harmless, and defended for any data you send to me. + +Please don't send CBOR data larger than 1024 bytes by email. If you want to send crash-producing CBOR data > 1024 bytes by email, please get my permission before sending it to me. + +## Credits + +- This guide used nlohmann/json contribution guidelines for inspiration as suggested in issue #22. +- Special thanks to @lukseven for pointing out the contribution guidelines didn't mention signing requirements. diff --git a/go-controller/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE b/go-controller/vendor/github.com/fxamacker/cbor/v2/LICENSE similarity index 93% rename from go-controller/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE rename to go-controller/vendor/github.com/fxamacker/cbor/v2/LICENSE index 91b5cef30e..eaa8504921 100644 --- a/go-controller/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE +++ b/go-controller/vendor/github.com/fxamacker/cbor/v2/LICENSE @@ -1,6 +1,6 @@ -The MIT License (MIT) +MIT License -Copyright (c) 2016 Yasuhiro Matsumoto +Copyright (c) 2019-present Faye Amacker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -18,4 +18,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +SOFTWARE. \ No newline at end of file diff --git a/go-controller/vendor/github.com/fxamacker/cbor/v2/README.md b/go-controller/vendor/github.com/fxamacker/cbor/v2/README.md new file mode 100644 index 0000000000..af0a79507e --- /dev/null +++ b/go-controller/vendor/github.com/fxamacker/cbor/v2/README.md @@ -0,0 +1,691 @@ +# CBOR Codec in Go + + + +[fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html). + +CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name-comparison-of-other-binary-) to JSON, MessagePack, Protocol Buffers, etc.  CBOR is an Internet Standard defined by [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94) and is designed to be relevant for decades. + +`fxamacker/cbor` is used in projects by Arm Ltd., Cisco, EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Microsoft, Mozilla, Oasis Protocol, Tailscale, Teleport, [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor). + +See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `cbor.MarshalToBuffer()` and `UserBufferEncMode` accepts user-specified buffer. + +## fxamacker/cbor + +[![](https://github.com/fxamacker/cbor/workflows/ci/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci) +[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A596%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A596%25%22) +[![CodeQL](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml) +[![](https://img.shields.io/badge/fuzzing-passing-44c010)](#fuzzing-and-code-coverage) +[![Go Report Card](https://goreportcard.com/badge/github.com/fxamacker/cbor)](https://goreportcard.com/report/github.com/fxamacker/cbor) + +`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)). + +Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc. + +Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc. + +
Highlights

+ +__🚀  Speed__ + +Encoding and decoding is fast without using Go's `unsafe` package. Slower settings are opt-in. Default limits allow very fast and memory efficient rejection of malformed CBOR data. + +__🔒  Security__ + +Decoder has configurable limits that defend against malicious inputs. Duplicate map key detection is supported. By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). + +Codec passed multiple confidential security assessments in 2022. No vulnerabilities found in subset of codec in a [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) prepared by NCC Group for Microsoft Corporation. + +__🗜️  Data Size__ + +Struct tags (`toarray`, `keyasint`, `omitempty`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit. + +__:jigsaw:  Usability__ + +API is mostly same as `encoding/json` plus interfaces that simplify concurrency for CBOR options. Encoding and decoding modes can be created at startup and reused by any goroutines. + +Presets include Core Deterministic Encoding, Preferred Serialization, CTAP2 Canonical CBOR, etc. + +__📆  Extensibility__ + +Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949.html#section-7.1) (e.g. CBOR tags) and extensive settings. API has interfaces that allow users to create custom encoding and decoding without modifying this library. + +


+ +
+ +### Secure Decoding with Configurable Settings + +`fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data. + +By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). + +
Example decoding with encoding/gob 💥 fatal error (out of memory)

+ +```Go +// Example of encoding/gob having "fatal error: runtime: out of memory" +// while decoding 181 bytes. +package main +import ( + "bytes" + "encoding/gob" + "encoding/hex" + "fmt" +) + +// Example data is from https://github.com/golang/go/issues/24446 +// (shortened to 181 bytes). +const data = "4dffb503010102303001ff30000109010130010800010130010800010130" + + "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" + + "860001013001ff860001013001ffb80000001eff850401010e3030303030" + + "30303030303030303001ff3000010c0104000016ffb70201010830303030" + + "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" + + "303030303030303030303030303030303030303030303030303030303030" + + "30" + +type X struct { + J *X + K map[string]int +} + +func main() { + raw, _ := hex.DecodeString(data) + decoder := gob.NewDecoder(bytes.NewReader(raw)) + + var x X + decoder.Decode(&x) // fatal error: runtime: out of memory + fmt.Println("Decoding finished.") +} +``` + +


+ +
+ +`fxamacker/cbor` is fast at rejecting malformed CBOR data. E.g. attempts to +decode 10 bytes of malicious CBOR data to `[]byte` (with default settings): + +| Codec | Speed (ns/op) | Memory | Allocs | +| :---- | ------------: | -----: | -----: | +| fxamacker/cbor 2.5.0 | 44 ± 5% | 32 B/op | 2 allocs/op | +| ugorji/go 1.2.11 | 5353261 ± 4% | 67111321 B/op | 13 allocs/op | + +
Benchmark details

+ +Latest comparison used: +- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +- go1.19.10, linux/amd64, i5-13600K (disabled all e-cores, DDR4 @2933) +- go test -bench=. -benchmem -count=20 + +#### Prior comparisons + +| Codec | Speed (ns/op) | Memory | Allocs | +| :---- | ------------: | -----: | -----: | +| fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op | +| fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op | +| ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op | +| ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate | + +- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +- go1.19.6, linux/amd64, i5-13600K (DDR4) +- go test -bench=. -benchmem -count=20 + +


+ +
+ +### Smaller Encodings with Struct Tags + +Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. + +
Example encoding 3-level nested Go struct to 1 byte CBOR

+ +https://go.dev/play/p/YxwvfPdFQG2 + +```Go +// Example encoding nested struct (with omitempty tag) +// - encoding/json: 18 byte JSON +// - fxamacker/cbor: 1 byte CBOR +package main + +import ( + "encoding/hex" + "encoding/json" + "fmt" + + "github.com/fxamacker/cbor/v2" +) + +type GrandChild struct { + Quux int `json:",omitempty"` +} + +type Child struct { + Baz int `json:",omitempty"` + Qux GrandChild `json:",omitempty"` +} + +type Parent struct { + Foo Child `json:",omitempty"` + Bar int `json:",omitempty"` +} + +func cb() { + results, _ := cbor.Marshal(Parent{}) + fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) + + text, _ := cbor.Diagnose(results) // Diagnostic Notation + fmt.Println("DN: " + text) +} + +func js() { + results, _ := json.Marshal(Parent{}) + fmt.Println("hex(JSON): " + hex.EncodeToString(results)) + + text := string(results) // JSON + fmt.Println("JSON: " + text) +} + +func main() { + cb() + fmt.Println("-------------") + js() +} +``` + +Output (DN is Diagnostic Notation): +``` +hex(CBOR): a0 +DN: {} +------------- +hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d +JSON: {"Foo":{"Qux":{}}} +``` + +


+ +
+ +Example using different struct tags together: + +![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags") + +API is mostly same as `encoding/json`, plus interfaces that simplify concurrency for CBOR options. + +## Quick Start + +__Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`. + +### Key Points + +This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742). + +- __CBOR data item__ is a single piece of CBOR data and its structure may contain 0 or more nested data items. +- __CBOR sequence__ is a concatenation of 0 or more encoded CBOR data items. + +Configurable limits and options can be used to balance trade-offs. + +- Encoding and decoding modes are created from options (settings). +- Modes can be created at startup and reused. +- Modes are safe for concurrent use. + +### Default Mode + +Package level functions only use this library's default settings. +They provide the "default mode" of encoding and decoding. + +```go +// API matches encoding/json for Marshal, Unmarshal, Encode, Decode, etc. +b, err = cbor.Marshal(v) // encode v to []byte b +err = cbor.Unmarshal(b, &v) // decode []byte b to v +decoder = cbor.NewDecoder(r) // create decoder with io.Reader r +err = decoder.Decode(&v) // decode a CBOR data item to v + +// v2.7.0 added MarshalToBuffer() and UserBufferEncMode interface. +err = cbor.MarshalToBuffer(v, b) // encode v to b instead of using built-in buf pool. + +// v2.5.0 added new functions that return remaining bytes. + +// UnmarshalFirst decodes first CBOR data item and returns remaining bytes. +rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v + +// DiagnoseFirst translates first CBOR data item to text and returns remaining bytes. +text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text + +// NOTE: Unmarshal returns ExtraneousDataError if there are remaining bytes, +// but new funcs UnmarshalFirst and DiagnoseFirst do not. +``` + +__IMPORTANT__: 👉 CBOR settings allow trade-offs between speed, security, encoding size, etc. + +- Different CBOR libraries may use different default settings. +- CBOR-based formats or protocols usually require specific settings. + +For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset. + +### Presets + +Presets can be used as-is or as a starting point for custom settings. + +```go +// EncOptions is a struct of encoder settings. +func CoreDetEncOptions() EncOptions // RFC 8949 Core Deterministic Encoding +func PreferredUnsortedEncOptions() EncOptions // RFC 8949 Preferred Serialization +func CanonicalEncOptions() EncOptions // RFC 7049 Canonical CBOR +func CTAP2EncOptions() EncOptions // FIDO2 CTAP2 Canonical CBOR +``` + +Presets are used to create custom modes. + +### Custom Modes + +Modes are created from settings. Once created, modes have immutable settings. + +💡 Create the mode at startup and reuse it. It is safe for concurrent use. + +```Go +// Create encoding mode. +opts := cbor.CoreDetEncOptions() // use preset options as a starting point +opts.Time = cbor.TimeUnix // change any settings if needed +em, err := opts.EncMode() // create an immutable encoding mode + +// Reuse the encoding mode. It is safe for concurrent use. + +// API matches encoding/json. +b, err := em.Marshal(v) // encode v to []byte b +encoder := em.NewEncoder(w) // create encoder with io.Writer w +err := encoder.Encode(v) // encode v to io.Writer w +``` + +Default mode and custom modes automatically apply struct tags. + +### User Specified Buffer for Encoding (v2.7.0) + +`UserBufferEncMode` interface extends `EncMode` interface to add `MarshalToBuffer()`. It accepts a user-specified buffer instead of using built-in buffer pool. + +```Go +em, err := myEncOptions.UserBufferEncMode() // create UserBufferEncMode mode + +var buf bytes.Buffer +err = em.MarshalToBuffer(v, &buf) // encode v to provided buf +``` + +### Struct Tags + +Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. + +
Example encoding 3-level nested Go struct to 1 byte CBOR

+ +https://go.dev/play/p/YxwvfPdFQG2 + +```Go +// Example encoding nested struct (with omitempty tag) +// - encoding/json: 18 byte JSON +// - fxamacker/cbor: 1 byte CBOR +package main + +import ( + "encoding/hex" + "encoding/json" + "fmt" + + "github.com/fxamacker/cbor/v2" +) + +type GrandChild struct { + Quux int `json:",omitempty"` +} + +type Child struct { + Baz int `json:",omitempty"` + Qux GrandChild `json:",omitempty"` +} + +type Parent struct { + Foo Child `json:",omitempty"` + Bar int `json:",omitempty"` +} + +func cb() { + results, _ := cbor.Marshal(Parent{}) + fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) + + text, _ := cbor.Diagnose(results) // Diagnostic Notation + fmt.Println("DN: " + text) +} + +func js() { + results, _ := json.Marshal(Parent{}) + fmt.Println("hex(JSON): " + hex.EncodeToString(results)) + + text := string(results) // JSON + fmt.Println("JSON: " + text) +} + +func main() { + cb() + fmt.Println("-------------") + js() +} +``` + +Output (DN is Diagnostic Notation): +``` +hex(CBOR): a0 +DN: {} +------------- +hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d +JSON: {"Foo":{"Qux":{}}} +``` + +


+ +
+ +
Example using several struct tags

+ +![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags") + +

+ +Struct tags simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys. + +### CBOR Tags + +CBOR tags are specified in a `TagSet`. + +Custom modes can be created with a `TagSet` to handle CBOR tags. + +```go +em, err := opts.EncMode() // no CBOR tags +em, err := opts.EncModeWithTags(ts) // immutable CBOR tags +em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags +``` + +`TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`. + +
Example using TagSet and TagOptions

+ +```go +// Use signedCWT struct defined in "Decoding CWT" example. + +// Create TagSet (safe for concurrency). +tags := cbor.NewTagSet() +// Register tag COSE_Sign1 18 with signedCWT type. +tags.Add( + cbor.TagOptions{EncTag: cbor.EncTagRequired, DecTag: cbor.DecTagRequired}, + reflect.TypeOf(signedCWT{}), + 18) + +// Create DecMode with immutable tags. +dm, _ := cbor.DecOptions{}.DecModeWithTags(tags) + +// Unmarshal to signedCWT with tag support. +var v signedCWT +if err := dm.Unmarshal(data, &v); err != nil { + return err +} + +// Create EncMode with immutable tags. +em, _ := cbor.EncOptions{}.EncModeWithTags(tags) + +// Marshal signedCWT with tag number. +if data, err := cbor.Marshal(v); err != nil { + return err +} +``` + +

+ +### Functions and Interfaces + +
Functions and interfaces at a glance

+ +Common functions with same API as `encoding/json`: +- `Marshal`, `Unmarshal` +- `NewEncoder`, `(*Encoder).Encode` +- `NewDecoder`, `(*Decoder).Decode` + +NOTE: `Unmarshal` will return `ExtraneousDataError` if there are remaining bytes +because RFC 8949 treats CBOR data item with remaining bytes as malformed. +- 💡 Use `UnmarshalFirst` to decode first CBOR data item and return any remaining bytes. + +Other useful functions: +- `Diagnose`, `DiagnoseFirst` produce human-readable [Extended Diagnostic Notation](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G) from CBOR data. +- `UnmarshalFirst` decodes first CBOR data item and return any remaining bytes. +- `Wellformed` returns true if the the CBOR data item is well-formed. + +Interfaces identical or comparable to Go `encoding` packages include: +`Marshaler`, `Unmarshaler`, `BinaryMarshaler`, and `BinaryUnmarshaler`. + +The `RawMessage` type can be used to delay CBOR decoding or precompute CBOR encoding. + +

+ +### Security Tips + +🔒 Use Go's `io.LimitReader` to limit size when decoding very large or indefinite size data. + +Default limits may need to be increased for systems handling very large data (e.g. blockchains). + +`DecOptions` can be used to modify default limits for `MaxArrayElements`, `MaxMapPairs`, and `MaxNestedLevels`. + +## Status + +v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality. + +For more details, see [release notes](https://github.com/fxamacker/cbor/releases). + +### Prior Release + +[v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings. + +v2.5.0 was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023). + +__IMPORTANT__: 👉 Before upgrading from v2.4 or older release, please read the notable changes highlighted in the release notes. v2.5.0 is a large release with bug fixes to error handling for extraneous data in `Unmarshal`, etc. that should be reviewed before upgrading. + +See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) for list of new features, improvements, and bug fixes. + +See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc. + + + +## Who uses fxamacker/cbor + +`fxamacker/cbor` is used in projects by Arm Ltd., Berlin Institute of Health at Charité, Chainlink, Cisco, Confidential Computing Consortium, ConsenSys, Dapper Labs, EdgeX Foundry, F5, FIDO Alliance, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Matrix.org, Microsoft, Mozilla, National Cybersecurity Agency of France (govt), Netherlands (govt), Oasis Protocol, Smallstep, Tailscale, Taurus SA, Teleport, TIBCO, and others. + +`fxamacker/cbor` passed multiple confidential security assessments. A [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) (prepared by NCC Group for Microsoft Corporation) includes a subset of fxamacker/cbor v2.4.0 in its scope. + +## Standards + +`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)). + +Notable CBOR features include: + +| CBOR Feature | Description | +| :--- | :--- | +| CBOR tags | API supports built-in and user-defined tags. | +| Preferred serialization | Integers encode to fewest bytes. Optional float64 → float32 → float16. | +| Map key sorting | Unsorted, length-first (Canonical CBOR), and bytewise-lexicographic (CTAP2). | +| Duplicate map keys | Always forbid for encoding and option to allow/forbid for decoding. | +| Indefinite length data | Option to allow/forbid for encoding and decoding. | +| Well-formedness | Always checked and enforced. | +| Basic validity checks | Optionally check UTF-8 validity and duplicate map keys. | +| Security considerations | Prevent integer overflow and resource exhaustion (RFC 8949 Section 10). | + +Known limitations are noted in the [Limitations section](#limitations). + +Go nil values for slices, maps, pointers, etc. are encoded as CBOR null. Empty slices, maps, etc. are encoded as empty CBOR arrays and maps. + +Decoder checks for all required well-formedness errors, including all "subkinds" of syntax errors and too little data. + +After well-formedness is verified, basic validity errors are handled as follows: + +* Invalid UTF-8 string: Decoder has option to check and return invalid UTF-8 string error. This check is enabled by default. +* Duplicate keys in a map: Decoder has options to ignore or enforce rejection of duplicate map keys. + +When decoding well-formed CBOR arrays and maps, decoder saves the first error it encounters and continues with the next item. Options to handle this differently may be added in the future. + +By default, decoder treats time values of floating-point NaN and Infinity as if they are CBOR Null or CBOR Undefined. + +__Click to expand topic:__ + +
+ Duplicate Map Keys

+ +This library provides options for fast detection and rejection of duplicate map keys based on applying a Go-specific data model to CBOR's extended generic data model in order to determine duplicate vs distinct map keys. Detection relies on whether the CBOR map key would be a duplicate "key" when decoded and applied to the user-provided Go map or struct. + +`DupMapKeyQuiet` turns off detection of duplicate map keys. It tries to use a "keep fastest" method by choosing either "keep first" or "keep last" depending on the Go data type. + +`DupMapKeyEnforcedAPF` enforces detection and rejection of duplidate map keys. Decoding stops immediately and returns `DupMapKeyError` when the first duplicate key is detected. The error includes the duplicate map key and the index number. + +APF suffix means "Allow Partial Fill" so the destination map or struct can contain some decoded values at the time of error. It is the caller's responsibility to respond to the `DupMapKeyError` by discarding the partially filled result if that's required by their protocol. + +

+ +
+ Tag Validity

+ +This library checks tag validity for built-in tags (currently tag numbers 0, 1, 2, 3, and 55799): + +* Inadmissible type for tag content +* Inadmissible value for tag content + +Unknown tag data items (not tag number 0, 1, 2, 3, or 55799) are handled in two ways: + +* When decoding into an empty interface, unknown tag data item will be decoded into `cbor.Tag` data type, which contains tag number and tag content. The tag content will be decoded into the default Go data type for the CBOR data type. +* When decoding into other Go types, unknown tag data item is decoded into the specified Go type. If Go type is registered with a tag number, the tag number can optionally be verified. + +Decoder also has an option to forbid tag data items (treat any tag data item as error) which is specified by protocols such as CTAP2 Canonical CBOR. + +For more information, see [decoding options](#decoding-options-1) and [tag options](#tag-options). + +

+ +## Limitations + +If any of these limitations prevent you from using this library, please open an issue along with a link to your project. + +* CBOR `Undefined` (0xf7) value decodes to Go's `nil` value. CBOR `Null` (0xf6) more closely matches Go's `nil`. +* CBOR map keys with data types not supported by Go for map keys are ignored and an error is returned after continuing to decode remaining items. +* When decoding registered CBOR tag data to interface type, decoder creates a pointer to registered Go type matching CBOR tag number. Requiring a pointer for this is a Go limitation. + +## Fuzzing and Code Coverage + +__Code coverage__ is always 95% or higher (with `go test -cover`) when tagging a release. + +__Coverage-guided fuzzing__ must pass billions of execs using before tagging a release. Fuzzing is done using nonpublic code which may eventually get merged into this project. Until then, reports like OpenSSF Scorecard can't detect fuzz tests being used by this project. + +
+ +## Versions and API Changes +This project uses [Semantic Versioning](https://semver.org), so the API is always backwards compatible unless the major version number changes. + +These functions have signatures identical to encoding/json and their API will continue to match `encoding/json` even after major new releases: +`Marshal`, `Unmarshal`, `NewEncoder`, `NewDecoder`, `(*Encoder).Encode`, and `(*Decoder).Decode`. + +Exclusions from SemVer: +- Newly added API documented as "subject to change". +- Newly added API in the master branch that has never been tagged in non-beta release. +- If function parameters are unchanged, bug fixes that change behavior (e.g. return error for edge case was missed in prior version). We try to highlight these in the release notes and add extended beta period. E.g. [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023). + +This project avoids breaking changes to behavior of encoding and decoding functions unless required to improve conformance with supported RFCs (e.g. RFC 8949, RFC 8742, etc.) Visible changes that don't improve conformance to standards are typically made available as new opt-in settings or new functions. + +## Code of Conduct + +This project has adopted the [Contributor Covenant Code of Conduct](CODE_OF_CONDUCT.md). Contact [faye.github@gmail.com](mailto:faye.github@gmail.com) with any questions or comments. + +## Contributing + +Please open an issue before beginning work on a PR. The improvement may have already been considered, etc. + +For more info, see [How to Contribute](CONTRIBUTING.md). + +## Security Policy + +Security fixes are provided for the latest released version of fxamacker/cbor. + +For the full text of the Security Policy, see [SECURITY.md](SECURITY.md). + +## Acknowledgements + +Many thanks to all the contributors on this project! + +I'm especially grateful to Bastian Müller and Dieter Shirley for suggesting and collaborating on CBOR stream mode, and much more. + +I'm very grateful to Stefan Tatschner, Yawning Angel, Jernej Kos, x448, ZenGround0, and Jakob Borg for their contributions or support in the very early days. + +Big thanks to Ben Luddy for his contributions in v2.6.0 and v2.7.0. + +This library clearly wouldn't be possible without Carsten Bormann authoring CBOR RFCs. + +Special thanks to Laurence Lundblade and Jeffrey Yasskin for their help on IETF mailing list or at [7049bis](https://github.com/cbor-wg/CBORbis). + +Huge thanks to The Go Authors for creating a fun and practical programming language with batteries included! + +This library uses `x448/float16` which used to be included. As a standalone package, `x448/float16` is useful to other projects as well. + +## License + +Copyright © 2019-2024 [Faye Amacker](https://github.com/fxamacker). + +fxamacker/cbor is licensed under the MIT License. See [LICENSE](LICENSE) for the full license text. + +
diff --git a/go-controller/vendor/github.com/fxamacker/cbor/v2/SECURITY.md b/go-controller/vendor/github.com/fxamacker/cbor/v2/SECURITY.md new file mode 100644 index 0000000000..9c05146d16 --- /dev/null +++ b/go-controller/vendor/github.com/fxamacker/cbor/v2/SECURITY.md @@ -0,0 +1,7 @@ +# Security Policy + +Security fixes are provided for the latest released version of fxamacker/cbor. + +If the security vulnerability is already known to the public, then you can open an issue as a bug report. + +To report security vulnerabilities not yet known to the public, please email faye.github@gmail.com and allow time for the problem to be resolved before reporting it to the public. diff --git a/go-controller/vendor/github.com/fxamacker/cbor/v2/bytestring.go b/go-controller/vendor/github.com/fxamacker/cbor/v2/bytestring.go new file mode 100644 index 0000000000..823bff12ce --- /dev/null +++ b/go-controller/vendor/github.com/fxamacker/cbor/v2/bytestring.go @@ -0,0 +1,63 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "errors" +) + +// ByteString represents CBOR byte string (major type 2). ByteString can be used +// when using a Go []byte is not possible or convenient. For example, Go doesn't +// allow []byte as map key, so ByteString can be used to support data formats +// having CBOR map with byte string keys. ByteString can also be used to +// encode invalid UTF-8 string as CBOR byte string. +// See DecOption.MapKeyByteStringMode for more details. +type ByteString string + +// Bytes returns bytes representing ByteString. +func (bs ByteString) Bytes() []byte { + return []byte(bs) +} + +// MarshalCBOR encodes ByteString as CBOR byte string (major type 2). +func (bs ByteString) MarshalCBOR() ([]byte, error) { + e := getEncodeBuffer() + defer putEncodeBuffer(e) + + // Encode length + encodeHead(e, byte(cborTypeByteString), uint64(len(bs))) + + // Encode data + buf := make([]byte, e.Len()+len(bs)) + n := copy(buf, e.Bytes()) + copy(buf[n:], bs) + + return buf, nil +} + +// UnmarshalCBOR decodes CBOR byte string (major type 2) to ByteString. +// Decoding CBOR null and CBOR undefined sets ByteString to be empty. +func (bs *ByteString) UnmarshalCBOR(data []byte) error { + if bs == nil { + return errors.New("cbor.ByteString: UnmarshalCBOR on nil pointer") + } + + // Decoding CBOR null and CBOR undefined to ByteString resets data. + // This behavior is similar to decoding CBOR null and CBOR undefined to []byte. + if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) { + *bs = "" + return nil + } + + d := decoder{data: data, dm: defaultDecMode} + + // Check if CBOR data type is byte string + if typ := d.nextCBORType(); typ != cborTypeByteString { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeByteString.String()} + } + + b, _ := d.parseByteString() + *bs = ByteString(b) + return nil +} diff --git a/go-controller/vendor/github.com/fxamacker/cbor/v2/cache.go b/go-controller/vendor/github.com/fxamacker/cbor/v2/cache.go new file mode 100644 index 0000000000..ea0f39e24f --- /dev/null +++ b/go-controller/vendor/github.com/fxamacker/cbor/v2/cache.go @@ -0,0 +1,363 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +type encodeFuncs struct { + ef encodeFunc + ief isEmptyFunc +} + +var ( + decodingStructTypeCache sync.Map // map[reflect.Type]*decodingStructType + encodingStructTypeCache sync.Map // map[reflect.Type]*encodingStructType + encodeFuncCache sync.Map // map[reflect.Type]encodeFuncs + typeInfoCache sync.Map // map[reflect.Type]*typeInfo +) + +type specialType int + +const ( + specialTypeNone specialType = iota + specialTypeUnmarshalerIface + specialTypeEmptyIface + specialTypeIface + specialTypeTag + specialTypeTime +) + +type typeInfo struct { + elemTypeInfo *typeInfo + keyTypeInfo *typeInfo + typ reflect.Type + kind reflect.Kind + nonPtrType reflect.Type + nonPtrKind reflect.Kind + spclType specialType +} + +func newTypeInfo(t reflect.Type) *typeInfo { + tInfo := typeInfo{typ: t, kind: t.Kind()} + + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + + k := t.Kind() + + tInfo.nonPtrType = t + tInfo.nonPtrKind = k + + if k == reflect.Interface { + if t.NumMethod() == 0 { + tInfo.spclType = specialTypeEmptyIface + } else { + tInfo.spclType = specialTypeIface + } + } else if t == typeTag { + tInfo.spclType = specialTypeTag + } else if t == typeTime { + tInfo.spclType = specialTypeTime + } else if reflect.PtrTo(t).Implements(typeUnmarshaler) { + tInfo.spclType = specialTypeUnmarshalerIface + } + + switch k { + case reflect.Array, reflect.Slice: + tInfo.elemTypeInfo = getTypeInfo(t.Elem()) + case reflect.Map: + tInfo.keyTypeInfo = getTypeInfo(t.Key()) + tInfo.elemTypeInfo = getTypeInfo(t.Elem()) + } + + return &tInfo +} + +type decodingStructType struct { + fields fields + fieldIndicesByName map[string]int + err error + toArray bool +} + +// The stdlib errors.Join was introduced in Go 1.20, and we still support Go 1.17, so instead, +// here's a very basic implementation of an aggregated error. +type multierror []error + +func (m multierror) Error() string { + var sb strings.Builder + for i, err := range m { + sb.WriteString(err.Error()) + if i < len(m)-1 { + sb.WriteString(", ") + } + } + return sb.String() +} + +func getDecodingStructType(t reflect.Type) *decodingStructType { + if v, _ := decodingStructTypeCache.Load(t); v != nil { + return v.(*decodingStructType) + } + + flds, structOptions := getFields(t) + + toArray := hasToArrayOption(structOptions) + + var errs []error + for i := 0; i < len(flds); i++ { + if flds[i].keyAsInt { + nameAsInt, numErr := strconv.Atoi(flds[i].name) + if numErr != nil { + errs = append(errs, errors.New("cbor: failed to parse field name \""+flds[i].name+"\" to int ("+numErr.Error()+")")) + break + } + flds[i].nameAsInt = int64(nameAsInt) + } + + flds[i].typInfo = getTypeInfo(flds[i].typ) + } + + fieldIndicesByName := make(map[string]int, len(flds)) + for i, fld := range flds { + if _, ok := fieldIndicesByName[fld.name]; ok { + errs = append(errs, fmt.Errorf("cbor: two or more fields of %v have the same name %q", t, fld.name)) + continue + } + fieldIndicesByName[fld.name] = i + } + + var err error + { + var multi multierror + for _, each := range errs { + if each != nil { + multi = append(multi, each) + } + } + if len(multi) == 1 { + err = multi[0] + } else if len(multi) > 1 { + err = multi + } + } + + structType := &decodingStructType{ + fields: flds, + fieldIndicesByName: fieldIndicesByName, + err: err, + toArray: toArray, + } + decodingStructTypeCache.Store(t, structType) + return structType +} + +type encodingStructType struct { + fields fields + bytewiseFields fields + lengthFirstFields fields + omitEmptyFieldsIdx []int + err error + toArray bool +} + +func (st *encodingStructType) getFields(em *encMode) fields { + switch em.sort { + case SortNone, SortFastShuffle: + return st.fields + case SortLengthFirst: + return st.lengthFirstFields + default: + return st.bytewiseFields + } +} + +type bytewiseFieldSorter struct { + fields fields +} + +func (x *bytewiseFieldSorter) Len() int { + return len(x.fields) +} + +func (x *bytewiseFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *bytewiseFieldSorter) Less(i, j int) bool { + return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0 +} + +type lengthFirstFieldSorter struct { + fields fields +} + +func (x *lengthFirstFieldSorter) Len() int { + return len(x.fields) +} + +func (x *lengthFirstFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *lengthFirstFieldSorter) Less(i, j int) bool { + if len(x.fields[i].cborName) != len(x.fields[j].cborName) { + return len(x.fields[i].cborName) < len(x.fields[j].cborName) + } + return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0 +} + +func getEncodingStructType(t reflect.Type) (*encodingStructType, error) { + if v, _ := encodingStructTypeCache.Load(t); v != nil { + structType := v.(*encodingStructType) + return structType, structType.err + } + + flds, structOptions := getFields(t) + + if hasToArrayOption(structOptions) { + return getEncodingStructToArrayType(t, flds) + } + + var err error + var hasKeyAsInt bool + var hasKeyAsStr bool + var omitEmptyIdx []int + e := getEncodeBuffer() + for i := 0; i < len(flds); i++ { + // Get field's encodeFunc + flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ) + if flds[i].ef == nil { + err = &UnsupportedTypeError{t} + break + } + + // Encode field name + if flds[i].keyAsInt { + nameAsInt, numErr := strconv.Atoi(flds[i].name) + if numErr != nil { + err = errors.New("cbor: failed to parse field name \"" + flds[i].name + "\" to int (" + numErr.Error() + ")") + break + } + flds[i].nameAsInt = int64(nameAsInt) + if nameAsInt >= 0 { + encodeHead(e, byte(cborTypePositiveInt), uint64(nameAsInt)) + } else { + n := nameAsInt*(-1) - 1 + encodeHead(e, byte(cborTypeNegativeInt), uint64(n)) + } + flds[i].cborName = make([]byte, e.Len()) + copy(flds[i].cborName, e.Bytes()) + e.Reset() + + hasKeyAsInt = true + } else { + encodeHead(e, byte(cborTypeTextString), uint64(len(flds[i].name))) + flds[i].cborName = make([]byte, e.Len()+len(flds[i].name)) + n := copy(flds[i].cborName, e.Bytes()) + copy(flds[i].cborName[n:], flds[i].name) + e.Reset() + + // If cborName contains a text string, then cborNameByteString contains a + // string that has the byte string major type but is otherwise identical to + // cborName. + flds[i].cborNameByteString = make([]byte, len(flds[i].cborName)) + copy(flds[i].cborNameByteString, flds[i].cborName) + // Reset encoded CBOR type to byte string, preserving the "additional + // information" bits: + flds[i].cborNameByteString[0] = byte(cborTypeByteString) | + getAdditionalInformation(flds[i].cborNameByteString[0]) + + hasKeyAsStr = true + } + + // Check if field can be omitted when empty + if flds[i].omitEmpty { + omitEmptyIdx = append(omitEmptyIdx, i) + } + } + putEncodeBuffer(e) + + if err != nil { + structType := &encodingStructType{err: err} + encodingStructTypeCache.Store(t, structType) + return structType, structType.err + } + + // Sort fields by canonical order + bytewiseFields := make(fields, len(flds)) + copy(bytewiseFields, flds) + sort.Sort(&bytewiseFieldSorter{bytewiseFields}) + + lengthFirstFields := bytewiseFields + if hasKeyAsInt && hasKeyAsStr { + lengthFirstFields = make(fields, len(flds)) + copy(lengthFirstFields, flds) + sort.Sort(&lengthFirstFieldSorter{lengthFirstFields}) + } + + structType := &encodingStructType{ + fields: flds, + bytewiseFields: bytewiseFields, + lengthFirstFields: lengthFirstFields, + omitEmptyFieldsIdx: omitEmptyIdx, + } + + encodingStructTypeCache.Store(t, structType) + return structType, structType.err +} + +func getEncodingStructToArrayType(t reflect.Type, flds fields) (*encodingStructType, error) { + for i := 0; i < len(flds); i++ { + // Get field's encodeFunc + flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ) + if flds[i].ef == nil { + structType := &encodingStructType{err: &UnsupportedTypeError{t}} + encodingStructTypeCache.Store(t, structType) + return structType, structType.err + } + } + + structType := &encodingStructType{ + fields: flds, + toArray: true, + } + encodingStructTypeCache.Store(t, structType) + return structType, structType.err +} + +func getEncodeFunc(t reflect.Type) (encodeFunc, isEmptyFunc) { + if v, _ := encodeFuncCache.Load(t); v != nil { + fs := v.(encodeFuncs) + return fs.ef, fs.ief + } + ef, ief := getEncodeFuncInternal(t) + encodeFuncCache.Store(t, encodeFuncs{ef, ief}) + return ef, ief +} + +func getTypeInfo(t reflect.Type) *typeInfo { + if v, _ := typeInfoCache.Load(t); v != nil { + return v.(*typeInfo) + } + tInfo := newTypeInfo(t) + typeInfoCache.Store(t, tInfo) + return tInfo +} + +func hasToArrayOption(tag string) bool { + s := ",toarray" + idx := strings.Index(tag, s) + return idx >= 0 && (len(tag) == idx+len(s) || tag[idx+len(s)] == ',') +} diff --git a/go-controller/vendor/github.com/fxamacker/cbor/v2/common.go b/go-controller/vendor/github.com/fxamacker/cbor/v2/common.go new file mode 100644 index 0000000000..ec038a49ec --- /dev/null +++ b/go-controller/vendor/github.com/fxamacker/cbor/v2/common.go @@ -0,0 +1,182 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "fmt" + "strconv" +) + +type cborType uint8 + +const ( + cborTypePositiveInt cborType = 0x00 + cborTypeNegativeInt cborType = 0x20 + cborTypeByteString cborType = 0x40 + cborTypeTextString cborType = 0x60 + cborTypeArray cborType = 0x80 + cborTypeMap cborType = 0xa0 + cborTypeTag cborType = 0xc0 + cborTypePrimitives cborType = 0xe0 +) + +func (t cborType) String() string { + switch t { + case cborTypePositiveInt: + return "positive integer" + case cborTypeNegativeInt: + return "negative integer" + case cborTypeByteString: + return "byte string" + case cborTypeTextString: + return "UTF-8 text string" + case cborTypeArray: + return "array" + case cborTypeMap: + return "map" + case cborTypeTag: + return "tag" + case cborTypePrimitives: + return "primitives" + default: + return "Invalid type " + strconv.Itoa(int(t)) + } +} + +type additionalInformation uint8 + +const ( + maxAdditionalInformationWithoutArgument = 23 + additionalInformationWith1ByteArgument = 24 + additionalInformationWith2ByteArgument = 25 + additionalInformationWith4ByteArgument = 26 + additionalInformationWith8ByteArgument = 27 + + // For major type 7. + additionalInformationAsFalse = 20 + additionalInformationAsTrue = 21 + additionalInformationAsNull = 22 + additionalInformationAsUndefined = 23 + additionalInformationAsFloat16 = 25 + additionalInformationAsFloat32 = 26 + additionalInformationAsFloat64 = 27 + + // For major type 2, 3, 4, 5. + additionalInformationAsIndefiniteLengthFlag = 31 +) + +const ( + maxSimpleValueInAdditionalInformation = 23 + minSimpleValueIn1ByteArgument = 32 +) + +func (ai additionalInformation) isIndefiniteLength() bool { + return ai == additionalInformationAsIndefiniteLengthFlag +} + +const ( + // From RFC 8949 Section 3: + // "The initial byte of each encoded data item contains both information about the major type + // (the high-order 3 bits, described in Section 3.1) and additional information + // (the low-order 5 bits)." + + // typeMask is used to extract major type in initial byte of encoded data item. + typeMask = 0xe0 + + // additionalInformationMask is used to extract additional information in initial byte of encoded data item. + additionalInformationMask = 0x1f +) + +func getType(raw byte) cborType { + return cborType(raw & typeMask) +} + +func getAdditionalInformation(raw byte) byte { + return raw & additionalInformationMask +} + +func isBreakFlag(raw byte) bool { + return raw == cborBreakFlag +} + +func parseInitialByte(b byte) (t cborType, ai byte) { + return getType(b), getAdditionalInformation(b) +} + +const ( + tagNumRFC3339Time = 0 + tagNumEpochTime = 1 + tagNumUnsignedBignum = 2 + tagNumNegativeBignum = 3 + tagNumExpectedLaterEncodingBase64URL = 21 + tagNumExpectedLaterEncodingBase64 = 22 + tagNumExpectedLaterEncodingBase16 = 23 + tagNumSelfDescribedCBOR = 55799 +) + +const ( + cborBreakFlag = byte(0xff) + cborByteStringWithIndefiniteLengthHead = byte(0x5f) + cborTextStringWithIndefiniteLengthHead = byte(0x7f) + cborArrayWithIndefiniteLengthHead = byte(0x9f) + cborMapWithIndefiniteLengthHead = byte(0xbf) +) + +var ( + cborFalse = []byte{0xf4} + cborTrue = []byte{0xf5} + cborNil = []byte{0xf6} + cborNaN = []byte{0xf9, 0x7e, 0x00} + cborPositiveInfinity = []byte{0xf9, 0x7c, 0x00} + cborNegativeInfinity = []byte{0xf9, 0xfc, 0x00} +) + +// validBuiltinTag checks that supported built-in tag numbers are followed by expected content types. +func validBuiltinTag(tagNum uint64, contentHead byte) error { + t := getType(contentHead) + switch tagNum { + case tagNumRFC3339Time: + // Tag content (date/time text string in RFC 3339 format) must be string type. + if t != cborTypeTextString { + return newInadmissibleTagContentTypeError( + tagNumRFC3339Time, + "text string", + t.String()) + } + return nil + + case tagNumEpochTime: + // Tag content (epoch date/time) must be uint, int, or float type. + if t != cborTypePositiveInt && t != cborTypeNegativeInt && (contentHead < 0xf9 || contentHead > 0xfb) { + return newInadmissibleTagContentTypeError( + tagNumEpochTime, + "integer or floating-point number", + t.String()) + } + return nil + + case tagNumUnsignedBignum, tagNumNegativeBignum: + // Tag content (bignum) must be byte type. + if t != cborTypeByteString { + return newInadmissibleTagContentTypeErrorf( + fmt.Sprintf( + "tag number %d or %d must be followed by byte string, got %s", + tagNumUnsignedBignum, + tagNumNegativeBignum, + t.String(), + )) + } + return nil + + case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16: + // From RFC 8949 3.4.5.2: + // The data item tagged can be a byte string or any other data item. In the latter + // case, the tag applies to all of the byte string data items contained in the data + // item, except for those contained in a nested data item tagged with an expected + // conversion. + return nil + } + + return nil +} diff --git a/go-controller/vendor/github.com/fxamacker/cbor/v2/decode.go b/go-controller/vendor/github.com/fxamacker/cbor/v2/decode.go new file mode 100644 index 0000000000..85842ac736 --- /dev/null +++ b/go-controller/vendor/github.com/fxamacker/cbor/v2/decode.go @@ -0,0 +1,3187 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "encoding" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "io" + "math" + "math/big" + "reflect" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/x448/float16" +) + +// Unmarshal parses the CBOR-encoded data into the value pointed to by v +// using default decoding options. If v is nil, not a pointer, or +// a nil pointer, Unmarshal returns an error. +// +// To unmarshal CBOR into a value implementing the Unmarshaler interface, +// Unmarshal calls that value's UnmarshalCBOR method with a valid +// CBOR value. +// +// To unmarshal CBOR byte string into a value implementing the +// encoding.BinaryUnmarshaler interface, Unmarshal calls that value's +// UnmarshalBinary method with decoded CBOR byte string. +// +// To unmarshal CBOR into a pointer, Unmarshal sets the pointer to nil +// if CBOR data is null (0xf6) or undefined (0xf7). Otherwise, Unmarshal +// unmarshals CBOR into the value pointed to by the pointer. If the +// pointer is nil, Unmarshal creates a new value for it to point to. +// +// To unmarshal CBOR into an empty interface value, Unmarshal uses the +// following rules: +// +// CBOR booleans decode to bool. +// CBOR positive integers decode to uint64. +// CBOR negative integers decode to int64 (big.Int if value overflows). +// CBOR floating points decode to float64. +// CBOR byte strings decode to []byte. +// CBOR text strings decode to string. +// CBOR arrays decode to []interface{}. +// CBOR maps decode to map[interface{}]interface{}. +// CBOR null and undefined values decode to nil. +// CBOR times (tag 0 and 1) decode to time.Time. +// CBOR bignums (tag 2 and 3) decode to big.Int. +// CBOR tags with an unrecognized number decode to cbor.Tag +// +// To unmarshal a CBOR array into a slice, Unmarshal allocates a new slice +// if the CBOR array is empty or slice capacity is less than CBOR array length. +// Otherwise Unmarshal overwrites existing elements, and sets slice length +// to CBOR array length. +// +// To unmarshal a CBOR array into a Go array, Unmarshal decodes CBOR array +// elements into Go array elements. If the Go array is smaller than the +// CBOR array, the extra CBOR array elements are discarded. If the CBOR +// array is smaller than the Go array, the extra Go array elements are +// set to zero values. +// +// To unmarshal a CBOR array into a struct, struct must have a special field "_" +// with struct tag `cbor:",toarray"`. Go array elements are decoded into struct +// fields. Any "omitempty" struct field tag option is ignored in this case. +// +// To unmarshal a CBOR map into a map, Unmarshal allocates a new map only if the +// map is nil. Otherwise Unmarshal reuses the existing map and keeps existing +// entries. Unmarshal stores key-value pairs from the CBOR map into Go map. +// See DecOptions.DupMapKey to enable duplicate map key detection. +// +// To unmarshal a CBOR map into a struct, Unmarshal matches CBOR map keys to the +// keys in the following priority: +// +// 1. "cbor" key in struct field tag, +// 2. "json" key in struct field tag, +// 3. struct field name. +// +// Unmarshal tries an exact match for field name, then a case-insensitive match. +// Map key-value pairs without corresponding struct fields are ignored. See +// DecOptions.ExtraReturnErrors to return error at unknown field. +// +// To unmarshal a CBOR text string into a time.Time value, Unmarshal parses text +// string formatted in RFC3339. To unmarshal a CBOR integer/float into a +// time.Time value, Unmarshal creates an unix time with integer/float as seconds +// and fractional seconds since January 1, 1970 UTC. As a special case, Infinite +// and NaN float values decode to time.Time's zero value. +// +// To unmarshal CBOR null (0xf6) and undefined (0xf7) values into a +// slice/map/pointer, Unmarshal sets Go value to nil. Because null is often +// used to mean "not present", unmarshalling CBOR null and undefined value +// into any other Go type has no effect and returns no error. +// +// Unmarshal supports CBOR tag 55799 (self-describe CBOR), tag 0 and 1 (time), +// and tag 2 and 3 (bignum). +// +// Unmarshal returns ExtraneousDataError error (without decoding into v) +// if there are any remaining bytes following the first valid CBOR data item. +// See UnmarshalFirst, if you want to unmarshal only the first +// CBOR data item without ExtraneousDataError caused by remaining bytes. +func Unmarshal(data []byte, v interface{}) error { + return defaultDecMode.Unmarshal(data, v) +} + +// UnmarshalFirst parses the first CBOR data item into the value pointed to by v +// using default decoding options. Any remaining bytes are returned in rest. +// +// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error. +// +// See the documentation for Unmarshal for details. +func UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) { + return defaultDecMode.UnmarshalFirst(data, v) +} + +// Valid checks whether data is a well-formed encoded CBOR data item and +// that it complies with default restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +// +// WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity) +// and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed". +// +// Deprecated: Valid is kept for compatibility and should not be used. +// Use Wellformed instead because it has a more appropriate name. +func Valid(data []byte) error { + return defaultDecMode.Valid(data) +} + +// Wellformed checks whether data is a well-formed encoded CBOR data item and +// that it complies with default restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +func Wellformed(data []byte) error { + return defaultDecMode.Wellformed(data) +} + +// Unmarshaler is the interface implemented by types that wish to unmarshal +// CBOR data themselves. The input is a valid CBOR value. UnmarshalCBOR +// must copy the CBOR data if it needs to use it after returning. +type Unmarshaler interface { + UnmarshalCBOR([]byte) error +} + +// InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +type InvalidUnmarshalError struct { + s string +} + +func (e *InvalidUnmarshalError) Error() string { + return e.s +} + +// UnmarshalTypeError describes a CBOR value that can't be decoded to a Go type. +type UnmarshalTypeError struct { + CBORType string // type of CBOR value + GoType string // type of Go value it could not be decoded into + StructFieldName string // name of the struct field holding the Go value (optional) + errorMsg string // additional error message (optional) +} + +func (e *UnmarshalTypeError) Error() string { + var s string + if e.StructFieldName != "" { + s = "cbor: cannot unmarshal " + e.CBORType + " into Go struct field " + e.StructFieldName + " of type " + e.GoType + } else { + s = "cbor: cannot unmarshal " + e.CBORType + " into Go value of type " + e.GoType + } + if e.errorMsg != "" { + s += " (" + e.errorMsg + ")" + } + return s +} + +// InvalidMapKeyTypeError describes invalid Go map key type when decoding CBOR map. +// For example, Go doesn't allow slice as map key. +type InvalidMapKeyTypeError struct { + GoType string +} + +func (e *InvalidMapKeyTypeError) Error() string { + return "cbor: invalid map key type: " + e.GoType +} + +// DupMapKeyError describes detected duplicate map key in CBOR map. +type DupMapKeyError struct { + Key interface{} + Index int +} + +func (e *DupMapKeyError) Error() string { + return fmt.Sprintf("cbor: found duplicate map key \"%v\" at map element index %d", e.Key, e.Index) +} + +// UnknownFieldError describes detected unknown field in CBOR map when decoding to Go struct. +type UnknownFieldError struct { + Index int +} + +func (e *UnknownFieldError) Error() string { + return fmt.Sprintf("cbor: found unknown field at map element index %d", e.Index) +} + +// UnacceptableDataItemError is returned when unmarshaling a CBOR input that contains a data item +// that is not acceptable to a specific CBOR-based application protocol ("invalid or unexpected" as +// described in RFC 8949 Section 5 Paragraph 3). +type UnacceptableDataItemError struct { + CBORType string + Message string +} + +func (e UnacceptableDataItemError) Error() string { + return fmt.Sprintf("cbor: data item of cbor type %s is not accepted by protocol: %s", e.CBORType, e.Message) +} + +// ByteStringExpectedFormatError is returned when unmarshaling CBOR byte string fails when +// using non-default ByteStringExpectedFormat decoding option that makes decoder expect +// a specified format such as base64, hex, etc. +type ByteStringExpectedFormatError struct { + expectedFormatOption ByteStringExpectedFormatMode + err error +} + +func newByteStringExpectedFormatError(expectedFormatOption ByteStringExpectedFormatMode, err error) *ByteStringExpectedFormatError { + return &ByteStringExpectedFormatError{expectedFormatOption, err} +} + +func (e *ByteStringExpectedFormatError) Error() string { + switch e.expectedFormatOption { + case ByteStringExpectedBase64URL: + return fmt.Sprintf("cbor: failed to decode base64url from byte string: %s", e.err) + + case ByteStringExpectedBase64: + return fmt.Sprintf("cbor: failed to decode base64 from byte string: %s", e.err) + + case ByteStringExpectedBase16: + return fmt.Sprintf("cbor: failed to decode hex from byte string: %s", e.err) + + default: + return fmt.Sprintf("cbor: failed to decode byte string in expected format %d: %s", e.expectedFormatOption, e.err) + } +} + +func (e *ByteStringExpectedFormatError) Unwrap() error { + return e.err +} + +// InadmissibleTagContentTypeError is returned when unmarshaling built-in CBOR tags +// fails because of inadmissible type for tag content. Currently, the built-in +// CBOR tags in this codec are tags 0-3 and 21-23. +// See "Tag validity" in RFC 8949 Section 5.3.2. +type InadmissibleTagContentTypeError struct { + s string + tagNum int + expectedTagContentType string + gotTagContentType string +} + +func newInadmissibleTagContentTypeError( + tagNum int, + expectedTagContentType string, + gotTagContentType string, +) *InadmissibleTagContentTypeError { + return &InadmissibleTagContentTypeError{ + tagNum: tagNum, + expectedTagContentType: expectedTagContentType, + gotTagContentType: gotTagContentType, + } +} + +func newInadmissibleTagContentTypeErrorf(s string) *InadmissibleTagContentTypeError { + return &InadmissibleTagContentTypeError{s: "cbor: " + s} //nolint:goconst // ignore "cbor" +} + +func (e *InadmissibleTagContentTypeError) Error() string { + if e.s == "" { + return fmt.Sprintf( + "cbor: tag number %d must be followed by %s, got %s", + e.tagNum, + e.expectedTagContentType, + e.gotTagContentType, + ) + } + return e.s +} + +// DupMapKeyMode specifies how to enforce duplicate map key. Two map keys are considered duplicates if: +// 1. When decoding into a struct, both keys match the same struct field. The keys are also +// considered duplicates if neither matches any field and decoding to interface{} would produce +// equal (==) values for both keys. +// 2. When decoding into a map, both keys are equal (==) when decoded into values of the +// destination map's key type. +type DupMapKeyMode int + +const ( + // DupMapKeyQuiet doesn't enforce duplicate map key. Decoder quietly (no error) + // uses faster of "keep first" or "keep last" depending on Go data type and other factors. + DupMapKeyQuiet DupMapKeyMode = iota + + // DupMapKeyEnforcedAPF enforces detection and rejection of duplicate map keys. + // APF means "Allow Partial Fill" and the destination map or struct can be partially filled. + // If a duplicate map key is detected, DupMapKeyError is returned without further decoding + // of the map. It's the caller's responsibility to respond to DupMapKeyError by + // discarding the partially filled result if their protocol requires it. + // WARNING: using DupMapKeyEnforcedAPF will decrease performance and increase memory use. + DupMapKeyEnforcedAPF + + maxDupMapKeyMode +) + +func (dmkm DupMapKeyMode) valid() bool { + return dmkm >= 0 && dmkm < maxDupMapKeyMode +} + +// IndefLengthMode specifies whether to allow indefinite length items. +type IndefLengthMode int + +const ( + // IndefLengthAllowed allows indefinite length items. + IndefLengthAllowed IndefLengthMode = iota + + // IndefLengthForbidden disallows indefinite length items. + IndefLengthForbidden + + maxIndefLengthMode +) + +func (m IndefLengthMode) valid() bool { + return m >= 0 && m < maxIndefLengthMode +} + +// TagsMode specifies whether to allow CBOR tags. +type TagsMode int + +const ( + // TagsAllowed allows CBOR tags. + TagsAllowed TagsMode = iota + + // TagsForbidden disallows CBOR tags. + TagsForbidden + + maxTagsMode +) + +func (tm TagsMode) valid() bool { + return tm >= 0 && tm < maxTagsMode +} + +// IntDecMode specifies which Go type (int64, uint64, or big.Int) should +// be used when decoding CBOR integers (major type 0 and 1) to Go interface{}. +type IntDecMode int + +const ( + // IntDecConvertNone affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It decodes CBOR unsigned integer (major type 0) to: + // - uint64 + // It decodes CBOR negative integer (major type 1) to: + // - int64 if value fits + // - big.Int or *big.Int (see BigIntDecMode) if value doesn't fit into int64 + IntDecConvertNone IntDecMode = iota + + // IntDecConvertSigned affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It decodes CBOR integers (major type 0 and 1) to: + // - int64 if value fits + // - big.Int or *big.Int (see BigIntDecMode) if value < math.MinInt64 + // - return UnmarshalTypeError if value > math.MaxInt64 + // Deprecated: IntDecConvertSigned should not be used. + // Please use other options, such as IntDecConvertSignedOrError, IntDecConvertSignedOrBigInt, IntDecConvertNone. + IntDecConvertSigned + + // IntDecConvertSignedOrFail affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It decodes CBOR integers (major type 0 and 1) to: + // - int64 if value fits + // - return UnmarshalTypeError if value doesn't fit into int64 + IntDecConvertSignedOrFail + + // IntDecConvertSigned affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It makes CBOR integers (major type 0 and 1) decode to: + // - int64 if value fits + // - big.Int or *big.Int (see BigIntDecMode) if value doesn't fit into int64 + IntDecConvertSignedOrBigInt + + maxIntDec +) + +func (idm IntDecMode) valid() bool { + return idm >= 0 && idm < maxIntDec +} + +// MapKeyByteStringMode specifies how to decode CBOR byte string (major type 2) +// as Go map key when decoding CBOR map key into an empty Go interface value. +// Specifically, this option applies when decoding CBOR map into +// - Go empty interface, or +// - Go map with empty interface as key type. +// The CBOR map key types handled by this option are +// - byte string +// - tagged byte string +// - nested tagged byte string +type MapKeyByteStringMode int + +const ( + // MapKeyByteStringAllowed allows CBOR byte string to be decoded as Go map key. + // Since Go doesn't allow []byte as map key, CBOR byte string is decoded to + // ByteString which has underlying string type. + // This is the default setting. + MapKeyByteStringAllowed MapKeyByteStringMode = iota + + // MapKeyByteStringForbidden forbids CBOR byte string being decoded as Go map key. + // Attempting to decode CBOR byte string as map key into empty interface value + // returns a decoding error. + MapKeyByteStringForbidden + + maxMapKeyByteStringMode +) + +func (mkbsm MapKeyByteStringMode) valid() bool { + return mkbsm >= 0 && mkbsm < maxMapKeyByteStringMode +} + +// ExtraDecErrorCond specifies extra conditions that should be treated as errors. +type ExtraDecErrorCond uint + +// ExtraDecErrorNone indicates no extra error condition. +const ExtraDecErrorNone ExtraDecErrorCond = 0 + +const ( + // ExtraDecErrorUnknownField indicates error condition when destination + // Go struct doesn't have a field matching a CBOR map key. + ExtraDecErrorUnknownField ExtraDecErrorCond = 1 << iota + + maxExtraDecError +) + +func (ec ExtraDecErrorCond) valid() bool { + return ec < maxExtraDecError +} + +// UTF8Mode option specifies if decoder should +// decode CBOR Text containing invalid UTF-8 string. +type UTF8Mode int + +const ( + // UTF8RejectInvalid rejects CBOR Text containing + // invalid UTF-8 string. + UTF8RejectInvalid UTF8Mode = iota + + // UTF8DecodeInvalid allows decoding CBOR Text containing + // invalid UTF-8 string. + UTF8DecodeInvalid + + maxUTF8Mode +) + +func (um UTF8Mode) valid() bool { + return um >= 0 && um < maxUTF8Mode +} + +// FieldNameMatchingMode specifies how string keys in CBOR maps are matched to Go struct field names. +type FieldNameMatchingMode int + +const ( + // FieldNameMatchingPreferCaseSensitive prefers to decode map items into struct fields whose names (or tag + // names) exactly match the item's key. If there is no such field, a map item will be decoded into a field whose + // name is a case-insensitive match for the item's key. + FieldNameMatchingPreferCaseSensitive FieldNameMatchingMode = iota + + // FieldNameMatchingCaseSensitive decodes map items only into a struct field whose name (or tag name) is an + // exact match for the item's key. + FieldNameMatchingCaseSensitive + + maxFieldNameMatchingMode +) + +func (fnmm FieldNameMatchingMode) valid() bool { + return fnmm >= 0 && fnmm < maxFieldNameMatchingMode +} + +// BigIntDecMode specifies how to decode CBOR bignum to Go interface{}. +type BigIntDecMode int + +const ( + // BigIntDecodeValue makes CBOR bignum decode to big.Int (instead of *big.Int) + // when unmarshalling into a Go interface{}. + BigIntDecodeValue BigIntDecMode = iota + + // BigIntDecodePointer makes CBOR bignum decode to *big.Int when + // unmarshalling into a Go interface{}. + BigIntDecodePointer + + maxBigIntDecMode +) + +func (bidm BigIntDecMode) valid() bool { + return bidm >= 0 && bidm < maxBigIntDecMode +} + +// ByteStringToStringMode specifies the behavior when decoding a CBOR byte string into a Go string. +type ByteStringToStringMode int + +const ( + // ByteStringToStringForbidden generates an error on an attempt to decode a CBOR byte string into a Go string. + ByteStringToStringForbidden ByteStringToStringMode = iota + + // ByteStringToStringAllowed permits decoding a CBOR byte string into a Go string. + ByteStringToStringAllowed + + // ByteStringToStringAllowedWithExpectedLaterEncoding permits decoding a CBOR byte string + // into a Go string. Also, if the byte string is enclosed (directly or indirectly) by one of + // the "expected later encoding" tags (numbers 21 through 23), the destination string will + // be populated by applying the designated text encoding to the contents of the input byte + // string. + ByteStringToStringAllowedWithExpectedLaterEncoding + + maxByteStringToStringMode +) + +func (bstsm ByteStringToStringMode) valid() bool { + return bstsm >= 0 && bstsm < maxByteStringToStringMode +} + +// FieldNameByteStringMode specifies the behavior when decoding a CBOR byte string map key as a Go struct field name. +type FieldNameByteStringMode int + +const ( + // FieldNameByteStringForbidden generates an error on an attempt to decode a CBOR byte string map key as a Go struct field name. + FieldNameByteStringForbidden FieldNameByteStringMode = iota + + // FieldNameByteStringAllowed permits CBOR byte string map keys to be recognized as Go struct field names. + FieldNameByteStringAllowed + + maxFieldNameByteStringMode +) + +func (fnbsm FieldNameByteStringMode) valid() bool { + return fnbsm >= 0 && fnbsm < maxFieldNameByteStringMode +} + +// UnrecognizedTagToAnyMode specifies how to decode unrecognized CBOR tag into an empty interface (any). +// Currently, recognized CBOR tag numbers are 0, 1, 2, 3, or registered by TagSet. +type UnrecognizedTagToAnyMode int + +const ( + // UnrecognizedTagNumAndContentToAny decodes CBOR tag number and tag content to cbor.Tag + // when decoding unrecognized CBOR tag into an empty interface. + UnrecognizedTagNumAndContentToAny UnrecognizedTagToAnyMode = iota + + // UnrecognizedTagContentToAny decodes only CBOR tag content (into its default type) + // when decoding unrecognized CBOR tag into an empty interface. + UnrecognizedTagContentToAny + + maxUnrecognizedTagToAny +) + +func (uttam UnrecognizedTagToAnyMode) valid() bool { + return uttam >= 0 && uttam < maxUnrecognizedTagToAny +} + +// TimeTagToAnyMode specifies how to decode CBOR tag 0 and 1 into an empty interface (any). +// Based on the specified mode, Unmarshal can return a time.Time value or a time string in a specific format. +type TimeTagToAnyMode int + +const ( + // TimeTagToTime decodes CBOR tag 0 and 1 into a time.Time value + // when decoding tag 0 or 1 into an empty interface. + TimeTagToTime TimeTagToAnyMode = iota + + // TimeTagToRFC3339 decodes CBOR tag 0 and 1 into a time string in RFC3339 format + // when decoding tag 0 or 1 into an empty interface. + TimeTagToRFC3339 + + // TimeTagToRFC3339Nano decodes CBOR tag 0 and 1 into a time string in RFC3339Nano format + // when decoding tag 0 or 1 into an empty interface. + TimeTagToRFC3339Nano + + maxTimeTagToAnyMode +) + +func (tttam TimeTagToAnyMode) valid() bool { + return tttam >= 0 && tttam < maxTimeTagToAnyMode +} + +// SimpleValueRegistry is a registry of unmarshaling behaviors for each possible CBOR simple value +// number (0...23 and 32...255). +type SimpleValueRegistry struct { + rejected [256]bool +} + +// WithRejectedSimpleValue registers the given simple value as rejected. If the simple value is +// encountered in a CBOR input during unmarshaling, an UnacceptableDataItemError is returned. +func WithRejectedSimpleValue(sv SimpleValue) func(*SimpleValueRegistry) error { + return func(r *SimpleValueRegistry) error { + if sv >= 24 && sv <= 31 { + return fmt.Errorf("cbor: cannot set analog for reserved simple value %d", sv) + } + r.rejected[sv] = true + return nil + } +} + +// Creates a new SimpleValueRegistry. The registry state is initialized by executing the provided +// functions in order against a registry that is pre-populated with the defaults for all well-formed +// simple value numbers. +func NewSimpleValueRegistryFromDefaults(fns ...func(*SimpleValueRegistry) error) (*SimpleValueRegistry, error) { + var r SimpleValueRegistry + for _, fn := range fns { + if err := fn(&r); err != nil { + return nil, err + } + } + return &r, nil +} + +// NaNMode specifies how to decode floating-point values (major type 7, additional information 25 +// through 27) representing NaN (not-a-number). +type NaNMode int + +const ( + // NaNDecodeAllowed will decode NaN values to Go float32 or float64. + NaNDecodeAllowed NaNMode = iota + + // NaNDecodeForbidden will return an UnacceptableDataItemError on an attempt to decode a NaN value. + NaNDecodeForbidden + + maxNaNDecode +) + +func (ndm NaNMode) valid() bool { + return ndm >= 0 && ndm < maxNaNDecode +} + +// InfMode specifies how to decode floating-point values (major type 7, additional information 25 +// through 27) representing positive or negative infinity. +type InfMode int + +const ( + // InfDecodeAllowed will decode infinite values to Go float32 or float64. + InfDecodeAllowed InfMode = iota + + // InfDecodeForbidden will return an UnacceptableDataItemError on an attempt to decode an + // infinite value. + InfDecodeForbidden + + maxInfDecode +) + +func (idm InfMode) valid() bool { + return idm >= 0 && idm < maxInfDecode +} + +// ByteStringToTimeMode specifies the behavior when decoding a CBOR byte string into a Go time.Time. +type ByteStringToTimeMode int + +const ( + // ByteStringToTimeForbidden generates an error on an attempt to decode a CBOR byte string into a Go time.Time. + ByteStringToTimeForbidden ByteStringToTimeMode = iota + + // ByteStringToTimeAllowed permits decoding a CBOR byte string into a Go time.Time. + ByteStringToTimeAllowed + + maxByteStringToTimeMode +) + +func (bttm ByteStringToTimeMode) valid() bool { + return bttm >= 0 && bttm < maxByteStringToTimeMode +} + +// ByteStringExpectedFormatMode specifies how to decode CBOR byte string into Go byte slice +// when the byte string is NOT enclosed in CBOR tag 21, 22, or 23. An error is returned if +// the CBOR byte string does not contain the expected format (e.g. base64) specified. +// For tags 21-23, see "Expected Later Encoding for CBOR-to-JSON Converters" +// in RFC 8949 Section 3.4.5.2. +type ByteStringExpectedFormatMode int + +const ( + // ByteStringExpectedFormatNone copies the unmodified CBOR byte string into Go byte slice + // if the byte string is not tagged by CBOR tag 21-23. + ByteStringExpectedFormatNone ByteStringExpectedFormatMode = iota + + // ByteStringExpectedBase64URL expects CBOR byte strings to contain base64url-encoded bytes + // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode + // the base64url-encoded bytes into Go slice. + ByteStringExpectedBase64URL + + // ByteStringExpectedBase64 expects CBOR byte strings to contain base64-encoded bytes + // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode + // the base64-encoded bytes into Go slice. + ByteStringExpectedBase64 + + // ByteStringExpectedBase16 expects CBOR byte strings to contain base16-encoded bytes + // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode + // the base16-encoded bytes into Go slice. + ByteStringExpectedBase16 + + maxByteStringExpectedFormatMode +) + +func (bsefm ByteStringExpectedFormatMode) valid() bool { + return bsefm >= 0 && bsefm < maxByteStringExpectedFormatMode +} + +// BignumTagMode specifies whether or not the "bignum" tags 2 and 3 (RFC 8949 Section 3.4.3) can be +// decoded. +type BignumTagMode int + +const ( + // BignumTagAllowed allows bignum tags to be decoded. + BignumTagAllowed BignumTagMode = iota + + // BignumTagForbidden produces an UnacceptableDataItemError during Unmarshal if a bignum tag + // is encountered in the input. + BignumTagForbidden + + maxBignumTag +) + +func (btm BignumTagMode) valid() bool { + return btm >= 0 && btm < maxBignumTag +} + +// BinaryUnmarshalerMode specifies how to decode into types that implement +// encoding.BinaryUnmarshaler. +type BinaryUnmarshalerMode int + +const ( + // BinaryUnmarshalerByteString will invoke UnmarshalBinary on the contents of a CBOR byte + // string when decoding into a value that implements BinaryUnmarshaler. + BinaryUnmarshalerByteString BinaryUnmarshalerMode = iota + + // BinaryUnmarshalerNone does not recognize BinaryUnmarshaler implementations during decode. + BinaryUnmarshalerNone + + maxBinaryUnmarshalerMode +) + +func (bum BinaryUnmarshalerMode) valid() bool { + return bum >= 0 && bum < maxBinaryUnmarshalerMode +} + +// DecOptions specifies decoding options. +type DecOptions struct { + // DupMapKey specifies whether to enforce duplicate map key. + DupMapKey DupMapKeyMode + + // TimeTag specifies whether or not untagged data items, or tags other + // than tag 0 and tag 1, can be decoded to time.Time. If tag 0 or tag 1 + // appears in an input, the type of its content is always validated as + // specified in RFC 8949. That behavior is not controlled by this + // option. The behavior of the supported modes are: + // + // DecTagIgnored (default): Untagged text strings and text strings + // enclosed in tags other than 0 and 1 are decoded as though enclosed + // in tag 0. Untagged unsigned integers, negative integers, and + // floating-point numbers (or those enclosed in tags other than 0 and + // 1) are decoded as though enclosed in tag 1. Decoding a tag other + // than 0 or 1 enclosing simple values null or undefined into a + // time.Time does not modify the destination value. + // + // DecTagOptional: Untagged text strings are decoded as though + // enclosed in tag 0. Untagged unsigned integers, negative integers, + // and floating-point numbers are decoded as though enclosed in tag + // 1. Tags other than 0 and 1 will produce an error on attempts to + // decode them into a time.Time. + // + // DecTagRequired: Only tags 0 and 1 can be decoded to time.Time. Any + // other input will produce an error. + TimeTag DecTagMode + + // MaxNestedLevels specifies the max nested levels allowed for any combination of CBOR array, maps, and tags. + // Default is 32 levels and it can be set to [4, 65535]. Note that higher maximum levels of nesting can + // require larger amounts of stack to deserialize. Don't increase this higher than you require. + MaxNestedLevels int + + // MaxArrayElements specifies the max number of elements for CBOR arrays. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxArrayElements int + + // MaxMapPairs specifies the max number of key-value pairs for CBOR maps. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxMapPairs int + + // IndefLength specifies whether to allow indefinite length CBOR items. + IndefLength IndefLengthMode + + // TagsMd specifies whether to allow CBOR tags (major type 6). + TagsMd TagsMode + + // IntDec specifies which Go integer type (int64 or uint64) to use + // when decoding CBOR int (major type 0 and 1) to Go interface{}. + IntDec IntDecMode + + // MapKeyByteString specifies how to decode CBOR byte string as map key + // when decoding CBOR map with byte string key into an empty interface value. + // By default, an error is returned when attempting to decode CBOR byte string + // as map key because Go doesn't allow []byte as map key. + MapKeyByteString MapKeyByteStringMode + + // ExtraReturnErrors specifies extra conditions that should be treated as errors. + ExtraReturnErrors ExtraDecErrorCond + + // DefaultMapType specifies Go map type to create and decode to + // when unmarshalling CBOR into an empty interface value. + // By default, unmarshal uses map[interface{}]interface{}. + DefaultMapType reflect.Type + + // UTF8 specifies if decoder should decode CBOR Text containing invalid UTF-8. + // By default, unmarshal rejects CBOR text containing invalid UTF-8. + UTF8 UTF8Mode + + // FieldNameMatching specifies how string keys in CBOR maps are matched to Go struct field names. + FieldNameMatching FieldNameMatchingMode + + // BigIntDec specifies how to decode CBOR bignum to Go interface{}. + BigIntDec BigIntDecMode + + // DefaultByteStringType is the Go type that should be produced when decoding a CBOR byte + // string into an empty interface value. Types to which a []byte is convertible are valid + // for this option, except for array and pointer-to-array types. If nil, the default is + // []byte. + DefaultByteStringType reflect.Type + + // ByteStringToString specifies the behavior when decoding a CBOR byte string into a Go string. + ByteStringToString ByteStringToStringMode + + // FieldNameByteString specifies the behavior when decoding a CBOR byte string map key as a + // Go struct field name. + FieldNameByteString FieldNameByteStringMode + + // UnrecognizedTagToAny specifies how to decode unrecognized CBOR tag into an empty interface. + // Currently, recognized CBOR tag numbers are 0, 1, 2, 3, or registered by TagSet. + UnrecognizedTagToAny UnrecognizedTagToAnyMode + + // TimeTagToAny specifies how to decode CBOR tag 0 and 1 into an empty interface (any). + // Based on the specified mode, Unmarshal can return a time.Time value or a time string in a specific format. + TimeTagToAny TimeTagToAnyMode + + // SimpleValues is an immutable mapping from each CBOR simple value to a corresponding + // unmarshal behavior. If nil, the simple values false, true, null, and undefined are mapped + // to the Go analog values false, true, nil, and nil, respectively, and all other simple + // values N (except the reserved simple values 24 through 31) are mapped to + // cbor.SimpleValue(N). In other words, all well-formed simple values can be decoded. + // + // Users may provide a custom SimpleValueRegistry constructed via + // NewSimpleValueRegistryFromDefaults. + SimpleValues *SimpleValueRegistry + + // NaN specifies how to decode floating-point values (major type 7, additional information + // 25 through 27) representing NaN (not-a-number). + NaN NaNMode + + // Inf specifies how to decode floating-point values (major type 7, additional information + // 25 through 27) representing positive or negative infinity. + Inf InfMode + + // ByteStringToTime specifies how to decode CBOR byte string into Go time.Time. + ByteStringToTime ByteStringToTimeMode + + // ByteStringExpectedFormat specifies how to decode CBOR byte string into Go byte slice + // when the byte string is NOT enclosed in CBOR tag 21, 22, or 23. An error is returned if + // the CBOR byte string does not contain the expected format (e.g. base64) specified. + // For tags 21-23, see "Expected Later Encoding for CBOR-to-JSON Converters" + // in RFC 8949 Section 3.4.5.2. + ByteStringExpectedFormat ByteStringExpectedFormatMode + + // BignumTag specifies whether or not the "bignum" tags 2 and 3 (RFC 8949 Section 3.4.3) can + // be decoded. Unlike BigIntDec, this option applies to all bignum tags encountered in a + // CBOR input, independent of the type of the destination value of a particular Unmarshal + // operation. + BignumTag BignumTagMode + + // BinaryUnmarshaler specifies how to decode into types that implement + // encoding.BinaryUnmarshaler. + BinaryUnmarshaler BinaryUnmarshalerMode +} + +// DecMode returns DecMode with immutable options and no tags (safe for concurrency). +func (opts DecOptions) DecMode() (DecMode, error) { //nolint:gocritic // ignore hugeParam + return opts.decMode() +} + +// validForTags checks that the provided tag set is compatible with these options and returns a +// non-nil error if and only if the provided tag set is incompatible. +func (opts DecOptions) validForTags(tags TagSet) error { //nolint:gocritic // ignore hugeParam + if opts.TagsMd == TagsForbidden { + return errors.New("cbor: cannot create DecMode with TagSet when TagsMd is TagsForbidden") + } + if tags == nil { + return errors.New("cbor: cannot create DecMode with nil value as TagSet") + } + if opts.ByteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding || + opts.ByteStringExpectedFormat != ByteStringExpectedFormatNone { + for _, tagNum := range []uint64{ + tagNumExpectedLaterEncodingBase64URL, + tagNumExpectedLaterEncodingBase64, + tagNumExpectedLaterEncodingBase16, + } { + if rt := tags.getTypeFromTagNum([]uint64{tagNum}); rt != nil { + return fmt.Errorf("cbor: DecMode with non-default StringExpectedEncoding or ByteSliceExpectedEncoding treats tag %d as built-in and conflicts with the provided TagSet's registration of %v", tagNum, rt) + } + } + + } + return nil +} + +// DecModeWithTags returns DecMode with options and tags that are both immutable (safe for concurrency). +func (opts DecOptions) DecModeWithTags(tags TagSet) (DecMode, error) { //nolint:gocritic // ignore hugeParam + if err := opts.validForTags(tags); err != nil { + return nil, err + } + dm, err := opts.decMode() + if err != nil { + return nil, err + } + + // Copy tags + ts := tagSet(make(map[reflect.Type]*tagItem)) + syncTags := tags.(*syncTagSet) + syncTags.RLock() + for contentType, tag := range syncTags.t { + if tag.opts.DecTag != DecTagIgnored { + ts[contentType] = tag + } + } + syncTags.RUnlock() + + if len(ts) > 0 { + dm.tags = ts + } + + return dm, nil +} + +// DecModeWithSharedTags returns DecMode with immutable options and mutable shared tags (safe for concurrency). +func (opts DecOptions) DecModeWithSharedTags(tags TagSet) (DecMode, error) { //nolint:gocritic // ignore hugeParam + if err := opts.validForTags(tags); err != nil { + return nil, err + } + dm, err := opts.decMode() + if err != nil { + return nil, err + } + dm.tags = tags + return dm, nil +} + +const ( + defaultMaxArrayElements = 131072 + minMaxArrayElements = 16 + maxMaxArrayElements = 2147483647 + + defaultMaxMapPairs = 131072 + minMaxMapPairs = 16 + maxMaxMapPairs = 2147483647 + + defaultMaxNestedLevels = 32 + minMaxNestedLevels = 4 + maxMaxNestedLevels = 65535 +) + +var defaultSimpleValues = func() *SimpleValueRegistry { + registry, err := NewSimpleValueRegistryFromDefaults() + if err != nil { + panic(err) + } + return registry +}() + +//nolint:gocyclo // Each option comes with some manageable boilerplate +func (opts DecOptions) decMode() (*decMode, error) { //nolint:gocritic // ignore hugeParam + if !opts.DupMapKey.valid() { + return nil, errors.New("cbor: invalid DupMapKey " + strconv.Itoa(int(opts.DupMapKey))) + } + + if !opts.TimeTag.valid() { + return nil, errors.New("cbor: invalid TimeTag " + strconv.Itoa(int(opts.TimeTag))) + } + + if !opts.IndefLength.valid() { + return nil, errors.New("cbor: invalid IndefLength " + strconv.Itoa(int(opts.IndefLength))) + } + + if !opts.TagsMd.valid() { + return nil, errors.New("cbor: invalid TagsMd " + strconv.Itoa(int(opts.TagsMd))) + } + + if !opts.IntDec.valid() { + return nil, errors.New("cbor: invalid IntDec " + strconv.Itoa(int(opts.IntDec))) + } + + if !opts.MapKeyByteString.valid() { + return nil, errors.New("cbor: invalid MapKeyByteString " + strconv.Itoa(int(opts.MapKeyByteString))) + } + + if opts.MaxNestedLevels == 0 { + opts.MaxNestedLevels = defaultMaxNestedLevels + } else if opts.MaxNestedLevels < minMaxNestedLevels || opts.MaxNestedLevels > maxMaxNestedLevels { + return nil, errors.New("cbor: invalid MaxNestedLevels " + strconv.Itoa(opts.MaxNestedLevels) + + " (range is [" + strconv.Itoa(minMaxNestedLevels) + ", " + strconv.Itoa(maxMaxNestedLevels) + "])") + } + + if opts.MaxArrayElements == 0 { + opts.MaxArrayElements = defaultMaxArrayElements + } else if opts.MaxArrayElements < minMaxArrayElements || opts.MaxArrayElements > maxMaxArrayElements { + return nil, errors.New("cbor: invalid MaxArrayElements " + strconv.Itoa(opts.MaxArrayElements) + + " (range is [" + strconv.Itoa(minMaxArrayElements) + ", " + strconv.Itoa(maxMaxArrayElements) + "])") + } + + if opts.MaxMapPairs == 0 { + opts.MaxMapPairs = defaultMaxMapPairs + } else if opts.MaxMapPairs < minMaxMapPairs || opts.MaxMapPairs > maxMaxMapPairs { + return nil, errors.New("cbor: invalid MaxMapPairs " + strconv.Itoa(opts.MaxMapPairs) + + " (range is [" + strconv.Itoa(minMaxMapPairs) + ", " + strconv.Itoa(maxMaxMapPairs) + "])") + } + + if !opts.ExtraReturnErrors.valid() { + return nil, errors.New("cbor: invalid ExtraReturnErrors " + strconv.Itoa(int(opts.ExtraReturnErrors))) + } + + if opts.DefaultMapType != nil && opts.DefaultMapType.Kind() != reflect.Map { + return nil, fmt.Errorf("cbor: invalid DefaultMapType %s", opts.DefaultMapType) + } + + if !opts.UTF8.valid() { + return nil, errors.New("cbor: invalid UTF8 " + strconv.Itoa(int(opts.UTF8))) + } + + if !opts.FieldNameMatching.valid() { + return nil, errors.New("cbor: invalid FieldNameMatching " + strconv.Itoa(int(opts.FieldNameMatching))) + } + + if !opts.BigIntDec.valid() { + return nil, errors.New("cbor: invalid BigIntDec " + strconv.Itoa(int(opts.BigIntDec))) + } + + if opts.DefaultByteStringType != nil && + opts.DefaultByteStringType.Kind() != reflect.String && + (opts.DefaultByteStringType.Kind() != reflect.Slice || opts.DefaultByteStringType.Elem().Kind() != reflect.Uint8) { + return nil, fmt.Errorf("cbor: invalid DefaultByteStringType: %s is not of kind string or []uint8", opts.DefaultByteStringType) + } + + if !opts.ByteStringToString.valid() { + return nil, errors.New("cbor: invalid ByteStringToString " + strconv.Itoa(int(opts.ByteStringToString))) + } + + if !opts.FieldNameByteString.valid() { + return nil, errors.New("cbor: invalid FieldNameByteString " + strconv.Itoa(int(opts.FieldNameByteString))) + } + + if !opts.UnrecognizedTagToAny.valid() { + return nil, errors.New("cbor: invalid UnrecognizedTagToAnyMode " + strconv.Itoa(int(opts.UnrecognizedTagToAny))) + } + simpleValues := opts.SimpleValues + if simpleValues == nil { + simpleValues = defaultSimpleValues + } + + if !opts.TimeTagToAny.valid() { + return nil, errors.New("cbor: invalid TimeTagToAny " + strconv.Itoa(int(opts.TimeTagToAny))) + } + + if !opts.NaN.valid() { + return nil, errors.New("cbor: invalid NaNDec " + strconv.Itoa(int(opts.NaN))) + } + + if !opts.Inf.valid() { + return nil, errors.New("cbor: invalid InfDec " + strconv.Itoa(int(opts.Inf))) + } + + if !opts.ByteStringToTime.valid() { + return nil, errors.New("cbor: invalid ByteStringToTime " + strconv.Itoa(int(opts.ByteStringToTime))) + } + + if !opts.ByteStringExpectedFormat.valid() { + return nil, errors.New("cbor: invalid ByteStringExpectedFormat " + strconv.Itoa(int(opts.ByteStringExpectedFormat))) + } + + if !opts.BignumTag.valid() { + return nil, errors.New("cbor: invalid BignumTag " + strconv.Itoa(int(opts.BignumTag))) + } + + if !opts.BinaryUnmarshaler.valid() { + return nil, errors.New("cbor: invalid BinaryUnmarshaler " + strconv.Itoa(int(opts.BinaryUnmarshaler))) + } + + dm := decMode{ + dupMapKey: opts.DupMapKey, + timeTag: opts.TimeTag, + maxNestedLevels: opts.MaxNestedLevels, + maxArrayElements: opts.MaxArrayElements, + maxMapPairs: opts.MaxMapPairs, + indefLength: opts.IndefLength, + tagsMd: opts.TagsMd, + intDec: opts.IntDec, + mapKeyByteString: opts.MapKeyByteString, + extraReturnErrors: opts.ExtraReturnErrors, + defaultMapType: opts.DefaultMapType, + utf8: opts.UTF8, + fieldNameMatching: opts.FieldNameMatching, + bigIntDec: opts.BigIntDec, + defaultByteStringType: opts.DefaultByteStringType, + byteStringToString: opts.ByteStringToString, + fieldNameByteString: opts.FieldNameByteString, + unrecognizedTagToAny: opts.UnrecognizedTagToAny, + timeTagToAny: opts.TimeTagToAny, + simpleValues: simpleValues, + nanDec: opts.NaN, + infDec: opts.Inf, + byteStringToTime: opts.ByteStringToTime, + byteStringExpectedFormat: opts.ByteStringExpectedFormat, + bignumTag: opts.BignumTag, + binaryUnmarshaler: opts.BinaryUnmarshaler, + } + + return &dm, nil +} + +// DecMode is the main interface for CBOR decoding. +type DecMode interface { + // Unmarshal parses the CBOR-encoded data into the value pointed to by v + // using the decoding mode. If v is nil, not a pointer, or a nil pointer, + // Unmarshal returns an error. + // + // See the documentation for Unmarshal for details. + Unmarshal(data []byte, v interface{}) error + + // UnmarshalFirst parses the first CBOR data item into the value pointed to by v + // using the decoding mode. Any remaining bytes are returned in rest. + // + // If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error. + // + // See the documentation for Unmarshal for details. + UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) + + // Valid checks whether data is a well-formed encoded CBOR data item and + // that it complies with configurable restrictions such as MaxNestedLevels, + // MaxArrayElements, MaxMapPairs, etc. + // + // If there are any remaining bytes after the CBOR data item, + // an ExtraneousDataError is returned. + // + // WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity) + // and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed". + // + // Deprecated: Valid is kept for compatibility and should not be used. + // Use Wellformed instead because it has a more appropriate name. + Valid(data []byte) error + + // Wellformed checks whether data is a well-formed encoded CBOR data item and + // that it complies with configurable restrictions such as MaxNestedLevels, + // MaxArrayElements, MaxMapPairs, etc. + // + // If there are any remaining bytes after the CBOR data item, + // an ExtraneousDataError is returned. + Wellformed(data []byte) error + + // NewDecoder returns a new decoder that reads from r using dm DecMode. + NewDecoder(r io.Reader) *Decoder + + // DecOptions returns user specified options used to create this DecMode. + DecOptions() DecOptions +} + +type decMode struct { + tags tagProvider + dupMapKey DupMapKeyMode + timeTag DecTagMode + maxNestedLevels int + maxArrayElements int + maxMapPairs int + indefLength IndefLengthMode + tagsMd TagsMode + intDec IntDecMode + mapKeyByteString MapKeyByteStringMode + extraReturnErrors ExtraDecErrorCond + defaultMapType reflect.Type + utf8 UTF8Mode + fieldNameMatching FieldNameMatchingMode + bigIntDec BigIntDecMode + defaultByteStringType reflect.Type + byteStringToString ByteStringToStringMode + fieldNameByteString FieldNameByteStringMode + unrecognizedTagToAny UnrecognizedTagToAnyMode + timeTagToAny TimeTagToAnyMode + simpleValues *SimpleValueRegistry + nanDec NaNMode + infDec InfMode + byteStringToTime ByteStringToTimeMode + byteStringExpectedFormat ByteStringExpectedFormatMode + bignumTag BignumTagMode + binaryUnmarshaler BinaryUnmarshalerMode +} + +var defaultDecMode, _ = DecOptions{}.decMode() + +// DecOptions returns user specified options used to create this DecMode. +func (dm *decMode) DecOptions() DecOptions { + simpleValues := dm.simpleValues + if simpleValues == defaultSimpleValues { + // Users can't explicitly set this to defaultSimpleValues. It must have been nil in + // the original DecOptions. + simpleValues = nil + } + + return DecOptions{ + DupMapKey: dm.dupMapKey, + TimeTag: dm.timeTag, + MaxNestedLevels: dm.maxNestedLevels, + MaxArrayElements: dm.maxArrayElements, + MaxMapPairs: dm.maxMapPairs, + IndefLength: dm.indefLength, + TagsMd: dm.tagsMd, + IntDec: dm.intDec, + MapKeyByteString: dm.mapKeyByteString, + ExtraReturnErrors: dm.extraReturnErrors, + DefaultMapType: dm.defaultMapType, + UTF8: dm.utf8, + FieldNameMatching: dm.fieldNameMatching, + BigIntDec: dm.bigIntDec, + DefaultByteStringType: dm.defaultByteStringType, + ByteStringToString: dm.byteStringToString, + FieldNameByteString: dm.fieldNameByteString, + UnrecognizedTagToAny: dm.unrecognizedTagToAny, + TimeTagToAny: dm.timeTagToAny, + SimpleValues: simpleValues, + NaN: dm.nanDec, + Inf: dm.infDec, + ByteStringToTime: dm.byteStringToTime, + ByteStringExpectedFormat: dm.byteStringExpectedFormat, + BignumTag: dm.bignumTag, + BinaryUnmarshaler: dm.binaryUnmarshaler, + } +} + +// Unmarshal parses the CBOR-encoded data into the value pointed to by v +// using dm decoding mode. If v is nil, not a pointer, or a nil pointer, +// Unmarshal returns an error. +// +// See the documentation for Unmarshal for details. +func (dm *decMode) Unmarshal(data []byte, v interface{}) error { + d := decoder{data: data, dm: dm} + + // Check well-formedness. + off := d.off // Save offset before data validation + err := d.wellformed(false, false) // don't allow any extra data after valid data item. + d.off = off // Restore offset + if err != nil { + return err + } + + return d.value(v) +} + +// UnmarshalFirst parses the first CBOR data item into the value pointed to by v +// using dm decoding mode. Any remaining bytes are returned in rest. +// +// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error. +// +// See the documentation for Unmarshal for details. +func (dm *decMode) UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) { + d := decoder{data: data, dm: dm} + + // check well-formedness. + off := d.off // Save offset before data validation + err = d.wellformed(true, false) // allow extra data after well-formed data item + d.off = off // Restore offset + + // If it is well-formed, parse the value. This is structured like this to allow + // better test coverage + if err == nil { + err = d.value(v) + } + + // If either wellformed or value returned an error, do not return rest bytes + if err != nil { + return nil, err + } + + // Return the rest of the data slice (which might be len 0) + return d.data[d.off:], nil +} + +// Valid checks whether data is a well-formed encoded CBOR data item and +// that it complies with configurable restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +// +// WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity) +// and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed". +// +// Deprecated: Valid is kept for compatibility and should not be used. +// Use Wellformed instead because it has a more appropriate name. +func (dm *decMode) Valid(data []byte) error { + return dm.Wellformed(data) +} + +// Wellformed checks whether data is a well-formed encoded CBOR data item and +// that it complies with configurable restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +func (dm *decMode) Wellformed(data []byte) error { + d := decoder{data: data, dm: dm} + return d.wellformed(false, false) +} + +// NewDecoder returns a new decoder that reads from r using dm DecMode. +func (dm *decMode) NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r, d: decoder{dm: dm}} +} + +type decoder struct { + data []byte + off int // next read offset in data + dm *decMode + + // expectedLaterEncodingTags stores a stack of encountered "Expected Later Encoding" tags, + // if any. + // + // The "Expected Later Encoding" tags (21 to 23) are valid for any data item. When decoding + // byte strings, the effective encoding comes from the tag nearest to the byte string being + // decoded. For example, the effective encoding of the byte string 21(22(h'41')) would be + // controlled by tag 22,and in the data item 23(h'42', 22([21(h'43')])]) the effective + // encoding of the byte strings h'42' and h'43' would be controlled by tag 23 and 21, + // respectively. + expectedLaterEncodingTags []uint64 +} + +// value decodes CBOR data item into the value pointed to by v. +// If CBOR data item fails to be decoded into v, +// error is returned and offset is moved to the next CBOR data item. +// Precondition: d.data contains at least one well-formed CBOR data item. +func (d *decoder) value(v interface{}) error { + // v can't be nil, non-pointer, or nil pointer value. + if v == nil { + return &InvalidUnmarshalError{"cbor: Unmarshal(nil)"} + } + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return &InvalidUnmarshalError{"cbor: Unmarshal(non-pointer " + rv.Type().String() + ")"} + } else if rv.IsNil() { + return &InvalidUnmarshalError{"cbor: Unmarshal(nil " + rv.Type().String() + ")"} + } + rv = rv.Elem() + return d.parseToValue(rv, getTypeInfo(rv.Type())) +} + +// parseToValue decodes CBOR data to value. It assumes data is well-formed, +// and does not perform bounds checking. +func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo + + // Decode CBOR nil or CBOR undefined to pointer value by setting pointer value to nil. + if d.nextCBORNil() && v.Kind() == reflect.Ptr { + d.skip() + v.Set(reflect.Zero(v.Type())) + return nil + } + + if tInfo.spclType == specialTypeIface { + if !v.IsNil() { + // Use value type + v = v.Elem() + tInfo = getTypeInfo(v.Type()) + } else { //nolint:gocritic + // Create and use registered type if CBOR data is registered tag + if d.dm.tags != nil && d.nextCBORType() == cborTypeTag { + + off := d.off + var tagNums []uint64 + for d.nextCBORType() == cborTypeTag { + _, _, tagNum := d.getHead() + tagNums = append(tagNums, tagNum) + } + d.off = off + + registeredType := d.dm.tags.getTypeFromTagNum(tagNums) + if registeredType != nil { + if registeredType.Implements(tInfo.nonPtrType) || + reflect.PtrTo(registeredType).Implements(tInfo.nonPtrType) { + v.Set(reflect.New(registeredType)) + v = v.Elem() + tInfo = getTypeInfo(registeredType) + } + } + } + } + } + + // Create new value for the pointer v to point to. + // At this point, CBOR value is not nil/undefined if v is a pointer. + for v.Kind() == reflect.Ptr { + if v.IsNil() { + if !v.CanSet() { + d.skip() + return errors.New("cbor: cannot set new value for " + v.Type().String()) + } + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + + // Strip self-described CBOR tag number. + for d.nextCBORType() == cborTypeTag { + off := d.off + _, _, tagNum := d.getHead() + if tagNum != tagNumSelfDescribedCBOR { + d.off = off + break + } + } + + // Check validity of supported built-in tags. + off := d.off + for d.nextCBORType() == cborTypeTag { + _, _, tagNum := d.getHead() + if err := validBuiltinTag(tagNum, d.data[d.off]); err != nil { + d.skip() + return err + } + } + d.off = off + + if tInfo.spclType != specialTypeNone { + switch tInfo.spclType { + case specialTypeEmptyIface: + iv, err := d.parse(false) // Skipped self-described CBOR tag number already. + if iv != nil { + v.Set(reflect.ValueOf(iv)) + } + return err + + case specialTypeTag: + return d.parseToTag(v) + + case specialTypeTime: + if d.nextCBORNil() { + // Decoding CBOR null and undefined to time.Time is no-op. + d.skip() + return nil + } + tm, ok, err := d.parseToTime() + if err != nil { + return err + } + if ok { + v.Set(reflect.ValueOf(tm)) + } + return nil + + case specialTypeUnmarshalerIface: + return d.parseToUnmarshaler(v) + } + } + + // Check registered tag number + if tagItem := d.getRegisteredTagItem(tInfo.nonPtrType); tagItem != nil { + t := d.nextCBORType() + if t != cborTypeTag { + if tagItem.opts.DecTag == DecTagRequired { + d.skip() // Required tag number is absent, skip entire tag + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.typ.String(), + errorMsg: "expect CBOR tag value"} + } + } else if err := d.validRegisteredTagNums(tagItem); err != nil { + d.skip() // Skip tag content + return err + } + } + + t := d.nextCBORType() + + switch t { + case cborTypePositiveInt: + _, _, val := d.getHead() + return fillPositiveInt(t, val, v) + + case cborTypeNegativeInt: + _, _, val := d.getHead() + if val > math.MaxInt64 { + // CBOR negative integer overflows int64, use big.Int to store value. + bi := new(big.Int) + bi.SetUint64(val) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if tInfo.nonPtrType == typeBigInt { + v.Set(reflect.ValueOf(*bi)) + return nil + } + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: bi.String() + " overflows Go's int64", + } + } + nValue := int64(-1) ^ int64(val) + return fillNegativeInt(t, nValue, v) + + case cborTypeByteString: + b, copied := d.parseByteString() + b, converted, err := d.applyByteStringTextConversion(b, v.Type()) + if err != nil { + return err + } + copied = copied || converted + return fillByteString(t, b, !copied, v, d.dm.byteStringToString, d.dm.binaryUnmarshaler) + + case cborTypeTextString: + b, err := d.parseTextString() + if err != nil { + return err + } + return fillTextString(t, b, v) + + case cborTypePrimitives: + _, ai, val := d.getHead() + switch ai { + case additionalInformationAsFloat16: + f := float64(float16.Frombits(uint16(val)).Float32()) + return fillFloat(t, f, v) + + case additionalInformationAsFloat32: + f := float64(math.Float32frombits(uint32(val))) + return fillFloat(t, f, v) + + case additionalInformationAsFloat64: + f := math.Float64frombits(val) + return fillFloat(t, f, v) + + default: // ai <= 24 + if d.dm.simpleValues.rejected[SimpleValue(val)] { + return &UnacceptableDataItemError{ + CBORType: t.String(), + Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", + } + } + + switch ai { + case additionalInformationAsFalse, + additionalInformationAsTrue: + return fillBool(t, ai == additionalInformationAsTrue, v) + + case additionalInformationAsNull, + additionalInformationAsUndefined: + return fillNil(t, v) + + default: + return fillPositiveInt(t, val, v) + } + } + + case cborTypeTag: + _, _, tagNum := d.getHead() + switch tagNum { + case tagNumUnsignedBignum: + // Bignum (tag 2) can be decoded to uint, int, float, slice, array, or big.Int. + b, copied := d.parseByteString() + bi := new(big.Int).SetBytes(b) + + if tInfo.nonPtrType == typeBigInt { + v.Set(reflect.ValueOf(*bi)) + return nil + } + if tInfo.nonPtrKind == reflect.Slice || tInfo.nonPtrKind == reflect.Array { + return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler) + } + if bi.IsUint64() { + return fillPositiveInt(t, bi.Uint64(), v) + } + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: bi.String() + " overflows " + v.Type().String(), + } + + case tagNumNegativeBignum: + // Bignum (tag 3) can be decoded to int, float, slice, array, or big.Int. + b, copied := d.parseByteString() + bi := new(big.Int).SetBytes(b) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if tInfo.nonPtrType == typeBigInt { + v.Set(reflect.ValueOf(*bi)) + return nil + } + if tInfo.nonPtrKind == reflect.Slice || tInfo.nonPtrKind == reflect.Array { + return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler) + } + if bi.IsInt64() { + return fillNegativeInt(t, bi.Int64(), v) + } + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: bi.String() + " overflows " + v.Type().String(), + } + + case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16: + // If conversion for interoperability with text encodings is not configured, + // treat tags 21-23 as unregistered tags. + if d.dm.byteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding || d.dm.byteStringExpectedFormat != ByteStringExpectedFormatNone { + d.expectedLaterEncodingTags = append(d.expectedLaterEncodingTags, tagNum) + defer func() { + d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:len(d.expectedLaterEncodingTags)-1] + }() + } + } + + return d.parseToValue(v, tInfo) + + case cborTypeArray: + if tInfo.nonPtrKind == reflect.Slice { + return d.parseArrayToSlice(v, tInfo) + } else if tInfo.nonPtrKind == reflect.Array { + return d.parseArrayToArray(v, tInfo) + } else if tInfo.nonPtrKind == reflect.Struct { + return d.parseArrayToStruct(v, tInfo) + } + d.skip() + return &UnmarshalTypeError{CBORType: t.String(), GoType: tInfo.nonPtrType.String()} + + case cborTypeMap: + if tInfo.nonPtrKind == reflect.Struct { + return d.parseMapToStruct(v, tInfo) + } else if tInfo.nonPtrKind == reflect.Map { + return d.parseMapToMap(v, tInfo) + } + d.skip() + return &UnmarshalTypeError{CBORType: t.String(), GoType: tInfo.nonPtrType.String()} + } + + return nil +} + +func (d *decoder) parseToTag(v reflect.Value) error { + if d.nextCBORNil() { + // Decoding CBOR null and undefined to cbor.Tag is no-op. + d.skip() + return nil + } + + t := d.nextCBORType() + if t != cborTypeTag { + d.skip() + return &UnmarshalTypeError{CBORType: t.String(), GoType: typeTag.String()} + } + + // Unmarshal tag number + _, _, num := d.getHead() + + // Unmarshal tag content + content, err := d.parse(false) + if err != nil { + return err + } + + v.Set(reflect.ValueOf(Tag{num, content})) + return nil +} + +// parseToTime decodes the current data item as a time.Time. The bool return value is false if and +// only if the destination value should remain unmodified. +func (d *decoder) parseToTime() (time.Time, bool, error) { + // Verify that tag number or absence of tag number is acceptable to specified timeTag. + if t := d.nextCBORType(); t == cborTypeTag { + if d.dm.timeTag == DecTagIgnored { + // Skip all enclosing tags + for t == cborTypeTag { + d.getHead() + t = d.nextCBORType() + } + if d.nextCBORNil() { + d.skip() + return time.Time{}, false, nil + } + } else { + // Read tag number + _, _, tagNum := d.getHead() + if tagNum != 0 && tagNum != 1 { + d.skip() // skip tag content + return time.Time{}, false, errors.New("cbor: wrong tag number for time.Time, got " + strconv.Itoa(int(tagNum)) + ", expect 0 or 1") + } + } + } else { + if d.dm.timeTag == DecTagRequired { + d.skip() + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String(), errorMsg: "expect CBOR tag value"} + } + } + + switch t := d.nextCBORType(); t { + case cborTypeByteString: + if d.dm.byteStringToTime == ByteStringToTimeAllowed { + b, _ := d.parseByteString() + t, err := time.Parse(time.RFC3339, string(b)) + if err != nil { + return time.Time{}, false, fmt.Errorf("cbor: cannot set %q for time.Time: %w", string(b), err) + } + return t, true, nil + } + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()} + + case cborTypeTextString: + s, err := d.parseTextString() + if err != nil { + return time.Time{}, false, err + } + t, err := time.Parse(time.RFC3339, string(s)) + if err != nil { + return time.Time{}, false, errors.New("cbor: cannot set " + string(s) + " for time.Time: " + err.Error()) + } + return t, true, nil + + case cborTypePositiveInt: + _, _, val := d.getHead() + if val > math.MaxInt64 { + return time.Time{}, false, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: typeTime.String(), + errorMsg: fmt.Sprintf("%d overflows Go's int64", val), + } + } + return time.Unix(int64(val), 0), true, nil + + case cborTypeNegativeInt: + _, _, val := d.getHead() + if val > math.MaxInt64 { + if val == math.MaxUint64 { + // Maximum absolute value representable by negative integer is 2^64, + // not 2^64-1, so it overflows uint64. + return time.Time{}, false, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: typeTime.String(), + errorMsg: "-18446744073709551616 overflows Go's int64", + } + } + return time.Time{}, false, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: typeTime.String(), + errorMsg: fmt.Sprintf("-%d overflows Go's int64", val+1), + } + } + return time.Unix(int64(-1)^int64(val), 0), true, nil + + case cborTypePrimitives: + _, ai, val := d.getHead() + var f float64 + switch ai { + case additionalInformationAsFloat16: + f = float64(float16.Frombits(uint16(val)).Float32()) + + case additionalInformationAsFloat32: + f = float64(math.Float32frombits(uint32(val))) + + case additionalInformationAsFloat64: + f = math.Float64frombits(val) + + default: + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()} + } + + if math.IsNaN(f) || math.IsInf(f, 0) { + // https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.2-6 + return time.Time{}, true, nil + } + seconds, fractional := math.Modf(f) + return time.Unix(int64(seconds), int64(fractional*1e9)), true, nil + + default: + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()} + } +} + +// parseToUnmarshaler parses CBOR data to value implementing Unmarshaler interface. +// It assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) parseToUnmarshaler(v reflect.Value) error { + if d.nextCBORNil() && v.Kind() == reflect.Ptr && v.IsNil() { + d.skip() + return nil + } + + if v.Kind() != reflect.Ptr && v.CanAddr() { + v = v.Addr() + } + if u, ok := v.Interface().(Unmarshaler); ok { + start := d.off + d.skip() + return u.UnmarshalCBOR(d.data[start:d.off]) + } + d.skip() + return errors.New("cbor: failed to assert " + v.Type().String() + " as cbor.Unmarshaler") +} + +// parse parses CBOR data and returns value in default Go type. +// It assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) parse(skipSelfDescribedTag bool) (interface{}, error) { //nolint:gocyclo + // Strip self-described CBOR tag number. + if skipSelfDescribedTag { + for d.nextCBORType() == cborTypeTag { + off := d.off + _, _, tagNum := d.getHead() + if tagNum != tagNumSelfDescribedCBOR { + d.off = off + break + } + } + } + + // Check validity of supported built-in tags. + off := d.off + for d.nextCBORType() == cborTypeTag { + _, _, tagNum := d.getHead() + if err := validBuiltinTag(tagNum, d.data[d.off]); err != nil { + d.skip() + return nil, err + } + } + d.off = off + + t := d.nextCBORType() + switch t { + case cborTypePositiveInt: + _, _, val := d.getHead() + + switch d.dm.intDec { + case IntDecConvertNone: + return val, nil + + case IntDecConvertSigned, IntDecConvertSignedOrFail: + if val > math.MaxInt64 { + return nil, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf(int64(0)).String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows Go's int64", + } + } + + return int64(val), nil + + case IntDecConvertSignedOrBigInt: + if val > math.MaxInt64 { + bi := new(big.Int).SetUint64(val) + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + } + + return int64(val), nil + + default: + // not reachable + } + + case cborTypeNegativeInt: + _, _, val := d.getHead() + + if val > math.MaxInt64 { + // CBOR negative integer value overflows Go int64, use big.Int instead. + bi := new(big.Int).SetUint64(val) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if d.dm.intDec == IntDecConvertSignedOrFail { + return nil, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf(int64(0)).String(), + errorMsg: bi.String() + " overflows Go's int64", + } + } + + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + } + + nValue := int64(-1) ^ int64(val) + return nValue, nil + + case cborTypeByteString: + b, copied := d.parseByteString() + var effectiveByteStringType = d.dm.defaultByteStringType + if effectiveByteStringType == nil { + effectiveByteStringType = typeByteSlice + } + b, converted, err := d.applyByteStringTextConversion(b, effectiveByteStringType) + if err != nil { + return nil, err + } + copied = copied || converted + + switch effectiveByteStringType { + case typeByteSlice: + if copied { + return b, nil + } + clone := make([]byte, len(b)) + copy(clone, b) + return clone, nil + + case typeString: + return string(b), nil + + default: + if copied || d.dm.defaultByteStringType.Kind() == reflect.String { + // Avoid an unnecessary copy since the conversion to string must + // copy the underlying bytes. + return reflect.ValueOf(b).Convert(d.dm.defaultByteStringType).Interface(), nil + } + clone := make([]byte, len(b)) + copy(clone, b) + return reflect.ValueOf(clone).Convert(d.dm.defaultByteStringType).Interface(), nil + } + + case cborTypeTextString: + b, err := d.parseTextString() + if err != nil { + return nil, err + } + return string(b), nil + + case cborTypeTag: + tagOff := d.off + _, _, tagNum := d.getHead() + contentOff := d.off + + switch tagNum { + case tagNumRFC3339Time, tagNumEpochTime: + d.off = tagOff + tm, _, err := d.parseToTime() + if err != nil { + return nil, err + } + + switch d.dm.timeTagToAny { + case TimeTagToTime: + return tm, nil + + case TimeTagToRFC3339: + if tagNum == 1 { + tm = tm.UTC() + } + // Call time.MarshalText() to format decoded time to RFC3339 format, + // and return error on time value that cannot be represented in + // RFC3339 format. E.g. year cannot exceed 9999, etc. + text, err := tm.Truncate(time.Second).MarshalText() + if err != nil { + return nil, fmt.Errorf("cbor: decoded time cannot be represented in RFC3339 format: %v", err) + } + return string(text), nil + + case TimeTagToRFC3339Nano: + if tagNum == 1 { + tm = tm.UTC() + } + // Call time.MarshalText() to format decoded time to RFC3339 format, + // and return error on time value that cannot be represented in + // RFC3339 format with sub-second precision. + text, err := tm.MarshalText() + if err != nil { + return nil, fmt.Errorf("cbor: decoded time cannot be represented in RFC3339 format with sub-second precision: %v", err) + } + return string(text), nil + + default: + // not reachable + } + + case tagNumUnsignedBignum: + b, _ := d.parseByteString() + bi := new(big.Int).SetBytes(b) + + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + + case tagNumNegativeBignum: + b, _ := d.parseByteString() + bi := new(big.Int).SetBytes(b) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + + case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16: + // If conversion for interoperability with text encodings is not configured, + // treat tags 21-23 as unregistered tags. + if d.dm.byteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding || + d.dm.byteStringExpectedFormat != ByteStringExpectedFormatNone { + d.expectedLaterEncodingTags = append(d.expectedLaterEncodingTags, tagNum) + defer func() { + d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:len(d.expectedLaterEncodingTags)-1] + }() + return d.parse(false) + } + } + + if d.dm.tags != nil { + // Parse to specified type if tag number is registered. + tagNums := []uint64{tagNum} + for d.nextCBORType() == cborTypeTag { + _, _, num := d.getHead() + tagNums = append(tagNums, num) + } + registeredType := d.dm.tags.getTypeFromTagNum(tagNums) + if registeredType != nil { + d.off = tagOff + rv := reflect.New(registeredType) + if err := d.parseToValue(rv.Elem(), getTypeInfo(registeredType)); err != nil { + return nil, err + } + return rv.Elem().Interface(), nil + } + } + + // Parse tag content + d.off = contentOff + content, err := d.parse(false) + if err != nil { + return nil, err + } + if d.dm.unrecognizedTagToAny == UnrecognizedTagContentToAny { + return content, nil + } + return Tag{tagNum, content}, nil + + case cborTypePrimitives: + _, ai, val := d.getHead() + if ai <= 24 && d.dm.simpleValues.rejected[SimpleValue(val)] { + return nil, &UnacceptableDataItemError{ + CBORType: t.String(), + Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", + } + } + if ai < 20 || ai == 24 { + return SimpleValue(val), nil + } + + switch ai { + case additionalInformationAsFalse, + additionalInformationAsTrue: + return (ai == additionalInformationAsTrue), nil + + case additionalInformationAsNull, + additionalInformationAsUndefined: + return nil, nil + + case additionalInformationAsFloat16: + f := float64(float16.Frombits(uint16(val)).Float32()) + return f, nil + + case additionalInformationAsFloat32: + f := float64(math.Float32frombits(uint32(val))) + return f, nil + + case additionalInformationAsFloat64: + f := math.Float64frombits(val) + return f, nil + } + + case cborTypeArray: + return d.parseArray() + + case cborTypeMap: + if d.dm.defaultMapType != nil { + m := reflect.New(d.dm.defaultMapType) + err := d.parseToValue(m, getTypeInfo(m.Elem().Type())) + if err != nil { + return nil, err + } + return m.Elem().Interface(), nil + } + return d.parseMap() + } + + return nil, nil +} + +// parseByteString parses a CBOR encoded byte string. The returned byte slice +// may be backed directly by the input. The second return value will be true if +// and only if the slice is backed by a copy of the input. Callers are +// responsible for making a copy if necessary. +func (d *decoder) parseByteString() ([]byte, bool) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + if !indefiniteLength { + b := d.data[d.off : d.off+int(val)] + d.off += int(val) + return b, false + } + // Process indefinite length string chunks. + b := []byte{} + for !d.foundBreak() { + _, _, val = d.getHead() + b = append(b, d.data[d.off:d.off+int(val)]...) + d.off += int(val) + } + return b, true +} + +// applyByteStringTextConversion converts bytes read from a byte string to or from a configured text +// encoding. If no transformation was performed (because it was not required), the original byte +// slice is returned and the bool return value is false. Otherwise, a new slice containing the +// converted bytes is returned along with the bool value true. +func (d *decoder) applyByteStringTextConversion( + src []byte, + dstType reflect.Type, +) ( + dst []byte, + transformed bool, + err error, +) { + switch dstType.Kind() { + case reflect.String: + if d.dm.byteStringToString != ByteStringToStringAllowedWithExpectedLaterEncoding || len(d.expectedLaterEncodingTags) == 0 { + return src, false, nil + } + + switch d.expectedLaterEncodingTags[len(d.expectedLaterEncodingTags)-1] { + case tagNumExpectedLaterEncodingBase64URL: + encoded := make([]byte, base64.RawURLEncoding.EncodedLen(len(src))) + base64.RawURLEncoding.Encode(encoded, src) + return encoded, true, nil + + case tagNumExpectedLaterEncodingBase64: + encoded := make([]byte, base64.StdEncoding.EncodedLen(len(src))) + base64.StdEncoding.Encode(encoded, src) + return encoded, true, nil + + case tagNumExpectedLaterEncodingBase16: + encoded := make([]byte, hex.EncodedLen(len(src))) + hex.Encode(encoded, src) + return encoded, true, nil + + default: + // If this happens, there is a bug: the decoder has pushed an invalid + // "expected later encoding" tag to the stack. + panic(fmt.Sprintf("unrecognized expected later encoding tag: %d", d.expectedLaterEncodingTags)) + } + + case reflect.Slice: + if dstType.Elem().Kind() != reflect.Uint8 || len(d.expectedLaterEncodingTags) > 0 { + // Either the destination is not a slice of bytes, or the encoder that + // produced the input indicated an expected text encoding tag and therefore + // the content of the byte string has NOT been text encoded. + return src, false, nil + } + + switch d.dm.byteStringExpectedFormat { + case ByteStringExpectedBase64URL: + decoded := make([]byte, base64.RawURLEncoding.DecodedLen(len(src))) + n, err := base64.RawURLEncoding.Decode(decoded, src) + if err != nil { + return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase64URL, err) + } + return decoded[:n], true, nil + + case ByteStringExpectedBase64: + decoded := make([]byte, base64.StdEncoding.DecodedLen(len(src))) + n, err := base64.StdEncoding.Decode(decoded, src) + if err != nil { + return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase64, err) + } + return decoded[:n], true, nil + + case ByteStringExpectedBase16: + decoded := make([]byte, hex.DecodedLen(len(src))) + n, err := hex.Decode(decoded, src) + if err != nil { + return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase16, err) + } + return decoded[:n], true, nil + } + } + + return src, false, nil +} + +// parseTextString parses CBOR encoded text string. It returns a byte slice +// to prevent creating an extra copy of string. Caller should wrap returned +// byte slice as string when needed. +func (d *decoder) parseTextString() ([]byte, error) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + if !indefiniteLength { + b := d.data[d.off : d.off+int(val)] + d.off += int(val) + if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(b) { + return nil, &SemanticError{"cbor: invalid UTF-8 string"} + } + return b, nil + } + // Process indefinite length string chunks. + b := []byte{} + for !d.foundBreak() { + _, _, val = d.getHead() + x := d.data[d.off : d.off+int(val)] + d.off += int(val) + if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(x) { + for !d.foundBreak() { + d.skip() // Skip remaining chunk on error + } + return nil, &SemanticError{"cbor: invalid UTF-8 string"} + } + b = append(b, x...) + } + return b, nil +} + +func (d *decoder) parseArray() ([]interface{}, error) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if !hasSize { + count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance + } + v := make([]interface{}, count) + var e interface{} + var err, lastErr error + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + if e, lastErr = d.parse(true); lastErr != nil { + if err == nil { + err = lastErr + } + continue + } + v[i] = e + } + return v, err +} + +func (d *decoder) parseArrayToSlice(v reflect.Value, tInfo *typeInfo) error { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if !hasSize { + count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance + } + if v.IsNil() || v.Cap() < count || count == 0 { + v.Set(reflect.MakeSlice(tInfo.nonPtrType, count, count)) + } + v.SetLen(count) + var err error + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + if lastErr := d.parseToValue(v.Index(i), tInfo.elemTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + } + } + return err +} + +func (d *decoder) parseArrayToArray(v reflect.Value, tInfo *typeInfo) error { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + gi := 0 + vLen := v.Len() + var err error + for ci := 0; (hasSize && ci < count) || (!hasSize && !d.foundBreak()); ci++ { + if gi < vLen { + // Read CBOR array element and set array element + if lastErr := d.parseToValue(v.Index(gi), tInfo.elemTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + } + gi++ + } else { + d.skip() // Skip remaining CBOR array element + } + } + // Set remaining Go array elements to zero values. + if gi < vLen { + zeroV := reflect.Zero(tInfo.elemTypeInfo.typ) + for ; gi < vLen; gi++ { + v.Index(gi).Set(zeroV) + } + } + return err +} + +func (d *decoder) parseMap() (interface{}, error) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + m := make(map[interface{}]interface{}) + var k, e interface{} + var err, lastErr error + keyCount := 0 + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + // Parse CBOR map key. + if k, lastErr = d.parse(true); lastErr != nil { + if err == nil { + err = lastErr + } + d.skip() + continue + } + + // Detect if CBOR map key can be used as Go map key. + rv := reflect.ValueOf(k) + if !isHashableValue(rv) { + var converted bool + if d.dm.mapKeyByteString == MapKeyByteStringAllowed { + k, converted = convertByteSliceToByteString(k) + } + if !converted { + if err == nil { + err = &InvalidMapKeyTypeError{rv.Type().String()} + } + d.skip() + continue + } + } + + // Parse CBOR map value. + if e, lastErr = d.parse(true); lastErr != nil { + if err == nil { + err = lastErr + } + continue + } + + // Add key-value pair to Go map. + m[k] = e + + // Detect duplicate map key. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + newKeyCount := len(m) + if newKeyCount == keyCount { + m[k] = nil + err = &DupMapKeyError{k, i} + i++ + // skip the rest of the map + for ; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + d.skip() // Skip map key + d.skip() // Skip map value + } + return m, err + } + keyCount = newKeyCount + } + } + return m, err +} + +func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if v.IsNil() { + mapsize := count + if !hasSize { + mapsize = 0 + } + v.Set(reflect.MakeMapWithSize(tInfo.nonPtrType, mapsize)) + } + keyType, eleType := tInfo.keyTypeInfo.typ, tInfo.elemTypeInfo.typ + reuseKey, reuseEle := isImmutableKind(tInfo.keyTypeInfo.kind), isImmutableKind(tInfo.elemTypeInfo.kind) + var keyValue, eleValue, zeroKeyValue, zeroEleValue reflect.Value + keyIsInterfaceType := keyType == typeIntf // If key type is interface{}, need to check if key value is hashable. + var err, lastErr error + keyCount := v.Len() + var existingKeys map[interface{}]bool // Store existing map keys, used for detecting duplicate map key. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + existingKeys = make(map[interface{}]bool, keyCount) + if keyCount > 0 { + vKeys := v.MapKeys() + for i := 0; i < len(vKeys); i++ { + existingKeys[vKeys[i].Interface()] = true + } + } + } + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + // Parse CBOR map key. + if !keyValue.IsValid() { + keyValue = reflect.New(keyType).Elem() + } else if !reuseKey { + if !zeroKeyValue.IsValid() { + zeroKeyValue = reflect.Zero(keyType) + } + keyValue.Set(zeroKeyValue) + } + if lastErr = d.parseToValue(keyValue, tInfo.keyTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + d.skip() + continue + } + + // Detect if CBOR map key can be used as Go map key. + if keyIsInterfaceType && keyValue.Elem().IsValid() { + if !isHashableValue(keyValue.Elem()) { + var converted bool + if d.dm.mapKeyByteString == MapKeyByteStringAllowed { + var k interface{} + k, converted = convertByteSliceToByteString(keyValue.Elem().Interface()) + if converted { + keyValue.Set(reflect.ValueOf(k)) + } + } + if !converted { + if err == nil { + err = &InvalidMapKeyTypeError{keyValue.Elem().Type().String()} + } + d.skip() + continue + } + } + } + + // Parse CBOR map value. + if !eleValue.IsValid() { + eleValue = reflect.New(eleType).Elem() + } else if !reuseEle { + if !zeroEleValue.IsValid() { + zeroEleValue = reflect.Zero(eleType) + } + eleValue.Set(zeroEleValue) + } + if lastErr := d.parseToValue(eleValue, tInfo.elemTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + continue + } + + // Add key-value pair to Go map. + v.SetMapIndex(keyValue, eleValue) + + // Detect duplicate map key. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + newKeyCount := v.Len() + if newKeyCount == keyCount { + kvi := keyValue.Interface() + if !existingKeys[kvi] { + v.SetMapIndex(keyValue, reflect.New(eleType).Elem()) + err = &DupMapKeyError{kvi, i} + i++ + // skip the rest of the map + for ; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + d.skip() // skip map key + d.skip() // skip map value + } + return err + } + delete(existingKeys, kvi) + } + keyCount = newKeyCount + } + } + return err +} + +func (d *decoder) parseArrayToStruct(v reflect.Value, tInfo *typeInfo) error { + structType := getDecodingStructType(tInfo.nonPtrType) + if structType.err != nil { + return structType.err + } + + if !structType.toArray { + t := d.nextCBORType() + d.skip() + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: "cannot decode CBOR array to struct without toarray option", + } + } + + start := d.off + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if !hasSize { + count = d.numOfItemsUntilBreak() // peek ahead to get array size + } + if count != len(structType.fields) { + d.off = start + d.skip() + return &UnmarshalTypeError{ + CBORType: cborTypeArray.String(), + GoType: tInfo.typ.String(), + errorMsg: "cannot decode CBOR array to struct with different number of elements", + } + } + var err, lastErr error + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + f := structType.fields[i] + + // Get field value by index + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + fv, lastErr = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) { + // Return a new value for embedded field null pointer to point to, or return error. + if !v.CanSet() { + return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String()) + } + v.Set(reflect.New(v.Type().Elem())) + return v, nil + }) + if lastErr != nil && err == nil { + err = lastErr + } + if !fv.IsValid() { + d.skip() + continue + } + } + + if lastErr = d.parseToValue(fv, f.typInfo); lastErr != nil { + if err == nil { + if typeError, ok := lastErr.(*UnmarshalTypeError); ok { + typeError.StructFieldName = tInfo.typ.String() + "." + f.name + err = typeError + } else { + err = lastErr + } + } + } + } + return err +} + +// parseMapToStruct needs to be fast so gocyclo can be ignored for now. +func (d *decoder) parseMapToStruct(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo + structType := getDecodingStructType(tInfo.nonPtrType) + if structType.err != nil { + return structType.err + } + + if structType.toArray { + t := d.nextCBORType() + d.skip() + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: "cannot decode CBOR map to struct with toarray option", + } + } + + var err, lastErr error + + // Get CBOR map size + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + + // Keeps track of matched struct fields + var foundFldIdx []bool + { + const maxStackFields = 128 + if nfields := len(structType.fields); nfields <= maxStackFields { + // For structs with typical field counts, expect that this can be + // stack-allocated. + var a [maxStackFields]bool + foundFldIdx = a[:nfields] + } else { + foundFldIdx = make([]bool, len(structType.fields)) + } + } + + // Keeps track of CBOR map keys to detect duplicate map key + keyCount := 0 + var mapKeys map[interface{}]struct{} + + errOnUnknownField := (d.dm.extraReturnErrors & ExtraDecErrorUnknownField) > 0 + +MapEntryLoop: + for j := 0; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + var f *field + + // If duplicate field detection is enabled and the key at index j did not match any + // field, k will hold the map key. + var k interface{} + + t := d.nextCBORType() + if t == cborTypeTextString || (t == cborTypeByteString && d.dm.fieldNameByteString == FieldNameByteStringAllowed) { + var keyBytes []byte + if t == cborTypeTextString { + keyBytes, lastErr = d.parseTextString() + if lastErr != nil { + if err == nil { + err = lastErr + } + d.skip() // skip value + continue + } + } else { // cborTypeByteString + keyBytes, _ = d.parseByteString() + } + + // Check for exact match on field name. + if i, ok := structType.fieldIndicesByName[string(keyBytes)]; ok { + fld := structType.fields[i] + + if !foundFldIdx[i] { + f = fld + foundFldIdx[i] = true + } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + err = &DupMapKeyError{fld.name, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } else { + // discard repeated match + d.skip() + continue MapEntryLoop + } + } + + // Find field with case-insensitive match + if f == nil && d.dm.fieldNameMatching == FieldNameMatchingPreferCaseSensitive { + keyLen := len(keyBytes) + keyString := string(keyBytes) + for i := 0; i < len(structType.fields); i++ { + fld := structType.fields[i] + if len(fld.name) == keyLen && strings.EqualFold(fld.name, keyString) { + if !foundFldIdx[i] { + f = fld + foundFldIdx[i] = true + } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + err = &DupMapKeyError{keyString, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } else { + // discard repeated match + d.skip() + continue MapEntryLoop + } + break + } + } + } + + if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil { + k = string(keyBytes) + } + } else if t <= cborTypeNegativeInt { // uint/int + var nameAsInt int64 + + if t == cborTypePositiveInt { + _, _, val := d.getHead() + nameAsInt = int64(val) + } else { + _, _, val := d.getHead() + if val > math.MaxInt64 { + if err == nil { + err = &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf(int64(0)).String(), + errorMsg: "-1-" + strconv.FormatUint(val, 10) + " overflows Go's int64", + } + } + d.skip() // skip value + continue + } + nameAsInt = int64(-1) ^ int64(val) + } + + // Find field + for i := 0; i < len(structType.fields); i++ { + fld := structType.fields[i] + if fld.keyAsInt && fld.nameAsInt == nameAsInt { + if !foundFldIdx[i] { + f = fld + foundFldIdx[i] = true + } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + err = &DupMapKeyError{nameAsInt, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } else { + // discard repeated match + d.skip() + continue MapEntryLoop + } + break + } + } + + if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil { + k = nameAsInt + } + } else { + if err == nil { + err = &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf("").String(), + errorMsg: "map key is of type " + t.String() + " and cannot be used to match struct field name", + } + } + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + // parse key + k, lastErr = d.parse(true) + if lastErr != nil { + d.skip() // skip value + continue + } + // Detect if CBOR map key can be used as Go map key. + if !isHashableValue(reflect.ValueOf(k)) { + d.skip() // skip value + continue + } + } else { + d.skip() // skip key + } + } + + if f == nil { + if errOnUnknownField { + err = &UnknownFieldError{j} + d.skip() // Skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } + + // Two map keys that match the same struct field are immediately considered + // duplicates. This check detects duplicates between two map keys that do + // not match a struct field. If unknown field errors are enabled, then this + // check is never reached. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + if mapKeys == nil { + mapKeys = make(map[interface{}]struct{}, 1) + } + mapKeys[k] = struct{}{} + newKeyCount := len(mapKeys) + if newKeyCount == keyCount { + err = &DupMapKeyError{k, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } + keyCount = newKeyCount + } + + d.skip() // Skip value + continue + } + + // Get field value by index + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + fv, lastErr = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) { + // Return a new value for embedded field null pointer to point to, or return error. + if !v.CanSet() { + return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String()) + } + v.Set(reflect.New(v.Type().Elem())) + return v, nil + }) + if lastErr != nil && err == nil { + err = lastErr + } + if !fv.IsValid() { + d.skip() + continue + } + } + + if lastErr = d.parseToValue(fv, f.typInfo); lastErr != nil { + if err == nil { + if typeError, ok := lastErr.(*UnmarshalTypeError); ok { + typeError.StructFieldName = tInfo.nonPtrType.String() + "." + f.name + err = typeError + } else { + err = lastErr + } + } + } + } + return err +} + +// validRegisteredTagNums verifies that tag numbers match registered tag numbers of type t. +// validRegisteredTagNums assumes next CBOR data type is tag. It scans all tag numbers, and stops at tag content. +func (d *decoder) validRegisteredTagNums(registeredTag *tagItem) error { + // Scan until next cbor data is tag content. + tagNums := make([]uint64, 0, 1) + for d.nextCBORType() == cborTypeTag { + _, _, val := d.getHead() + tagNums = append(tagNums, val) + } + + if !registeredTag.equalTagNum(tagNums) { + return &WrongTagError{registeredTag.contentType, registeredTag.num, tagNums} + } + return nil +} + +func (d *decoder) getRegisteredTagItem(vt reflect.Type) *tagItem { + if d.dm.tags != nil { + return d.dm.tags.getTagItemFromType(vt) + } + return nil +} + +// skip moves data offset to the next item. skip assumes data is well-formed, +// and does not perform bounds checking. +func (d *decoder) skip() { + t, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + + if indefiniteLength { + switch t { + case cborTypeByteString, cborTypeTextString, cborTypeArray, cborTypeMap: + for { + if isBreakFlag(d.data[d.off]) { + d.off++ + return + } + d.skip() + } + } + } + + switch t { + case cborTypeByteString, cborTypeTextString: + d.off += int(val) + + case cborTypeArray: + for i := 0; i < int(val); i++ { + d.skip() + } + + case cborTypeMap: + for i := 0; i < int(val)*2; i++ { + d.skip() + } + + case cborTypeTag: + d.skip() + } +} + +func (d *decoder) getHeadWithIndefiniteLengthFlag() ( + t cborType, + ai byte, + val uint64, + indefiniteLength bool, +) { + t, ai, val = d.getHead() + indefiniteLength = additionalInformation(ai).isIndefiniteLength() + return +} + +// getHead assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) getHead() (t cborType, ai byte, val uint64) { + t, ai = parseInitialByte(d.data[d.off]) + val = uint64(ai) + d.off++ + + if ai <= maxAdditionalInformationWithoutArgument { + return + } + + if ai == additionalInformationWith1ByteArgument { + val = uint64(d.data[d.off]) + d.off++ + return + } + + if ai == additionalInformationWith2ByteArgument { + const argumentSize = 2 + val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + return + } + + if ai == additionalInformationWith4ByteArgument { + const argumentSize = 4 + val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + return + } + + if ai == additionalInformationWith8ByteArgument { + const argumentSize = 8 + val = binary.BigEndian.Uint64(d.data[d.off : d.off+argumentSize]) + d.off += argumentSize + return + } + return +} + +func (d *decoder) numOfItemsUntilBreak() int { + savedOff := d.off + i := 0 + for !d.foundBreak() { + d.skip() + i++ + } + d.off = savedOff + return i +} + +// foundBreak returns true if next byte is CBOR break code and moves cursor by 1, +// otherwise it returns false. +// foundBreak assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) foundBreak() bool { + if isBreakFlag(d.data[d.off]) { + d.off++ + return true + } + return false +} + +func (d *decoder) reset(data []byte) { + d.data = data + d.off = 0 + d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:0] +} + +func (d *decoder) nextCBORType() cborType { + return getType(d.data[d.off]) +} + +func (d *decoder) nextCBORNil() bool { + return d.data[d.off] == 0xf6 || d.data[d.off] == 0xf7 +} + +var ( + typeIntf = reflect.TypeOf([]interface{}(nil)).Elem() + typeTime = reflect.TypeOf(time.Time{}) + typeBigInt = reflect.TypeOf(big.Int{}) + typeUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + typeBinaryUnmarshaler = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() + typeString = reflect.TypeOf("") + typeByteSlice = reflect.TypeOf([]byte(nil)) +) + +func fillNil(_ cborType, v reflect.Value) error { + switch v.Kind() { + case reflect.Slice, reflect.Map, reflect.Interface, reflect.Ptr: + v.Set(reflect.Zero(v.Type())) + return nil + } + return nil +} + +func fillPositiveInt(t cborType, val uint64, v reflect.Value) error { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if val > math.MaxInt64 { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(), + } + } + if v.OverflowInt(int64(val)) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(), + } + } + v.SetInt(int64(val)) + return nil + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if v.OverflowUint(val) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(), + } + } + v.SetUint(val) + return nil + + case reflect.Float32, reflect.Float64: + f := float64(val) + v.SetFloat(f) + return nil + } + + if v.Type() == typeBigInt { + i := new(big.Int).SetUint64(val) + v.Set(reflect.ValueOf(*i)) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillNegativeInt(t cborType, val int64, v reflect.Value) error { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if v.OverflowInt(val) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatInt(val, 10) + " overflows " + v.Type().String(), + } + } + v.SetInt(val) + return nil + + case reflect.Float32, reflect.Float64: + f := float64(val) + v.SetFloat(f) + return nil + } + if v.Type() == typeBigInt { + i := new(big.Int).SetInt64(val) + v.Set(reflect.ValueOf(*i)) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillBool(t cborType, val bool, v reflect.Value) error { + if v.Kind() == reflect.Bool { + v.SetBool(val) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillFloat(t cborType, val float64, v reflect.Value) error { + switch v.Kind() { + case reflect.Float32, reflect.Float64: + if v.OverflowFloat(val) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatFloat(val, 'E', -1, 64) + " overflows " + v.Type().String(), + } + } + v.SetFloat(val) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillByteString(t cborType, val []byte, shared bool, v reflect.Value, bsts ByteStringToStringMode, bum BinaryUnmarshalerMode) error { + if bum == BinaryUnmarshalerByteString && reflect.PtrTo(v.Type()).Implements(typeBinaryUnmarshaler) { + if v.CanAddr() { + v = v.Addr() + if u, ok := v.Interface().(encoding.BinaryUnmarshaler); ok { + // The contract of BinaryUnmarshaler forbids + // retaining the input bytes, so no copying is + // required even if val is shared. + return u.UnmarshalBinary(val) + } + } + return errors.New("cbor: cannot set new value for " + v.Type().String()) + } + if bsts != ByteStringToStringForbidden && v.Kind() == reflect.String { + v.SetString(string(val)) + return nil + } + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 { + src := val + if shared { + // SetBytes shares the underlying bytes of the source slice. + src = make([]byte, len(val)) + copy(src, val) + } + v.SetBytes(src) + return nil + } + if v.Kind() == reflect.Array && v.Type().Elem().Kind() == reflect.Uint8 { + vLen := v.Len() + i := 0 + for ; i < vLen && i < len(val); i++ { + v.Index(i).SetUint(uint64(val[i])) + } + // Set remaining Go array elements to zero values. + if i < vLen { + zeroV := reflect.Zero(reflect.TypeOf(byte(0))) + for ; i < vLen; i++ { + v.Index(i).Set(zeroV) + } + } + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillTextString(t cborType, val []byte, v reflect.Value) error { + if v.Kind() == reflect.String { + v.SetString(string(val)) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func isImmutableKind(k reflect.Kind) bool { + switch k { + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64, + reflect.String: + return true + + default: + return false + } +} + +func isHashableValue(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Slice, reflect.Map, reflect.Func: + return false + + case reflect.Struct: + switch rv.Type() { + case typeTag: + tag := rv.Interface().(Tag) + return isHashableValue(reflect.ValueOf(tag.Content)) + case typeBigInt: + return false + } + } + return true +} + +// convertByteSliceToByteString converts []byte to ByteString if +// - v is []byte type, or +// - v is Tag type and tag content type is []byte +// This function also handles nested tags. +// CBOR data is already verified to be well-formed before this function is used, +// so the recursion won't exceed max nested levels. +func convertByteSliceToByteString(v interface{}) (interface{}, bool) { + switch v := v.(type) { + case []byte: + return ByteString(v), true + + case Tag: + content, converted := convertByteSliceToByteString(v.Content) + if converted { + return Tag{Number: v.Number, Content: content}, true + } + } + return v, false +} diff --git a/go-controller/vendor/github.com/fxamacker/cbor/v2/diagnose.go b/go-controller/vendor/github.com/fxamacker/cbor/v2/diagnose.go new file mode 100644 index 0000000000..44afb86608 --- /dev/null +++ b/go-controller/vendor/github.com/fxamacker/cbor/v2/diagnose.go @@ -0,0 +1,724 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "encoding/base32" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "io" + "math" + "math/big" + "strconv" + "unicode/utf16" + "unicode/utf8" + + "github.com/x448/float16" +) + +// DiagMode is the main interface for CBOR diagnostic notation. +type DiagMode interface { + // Diagnose returns extended diagnostic notation (EDN) of CBOR data items using this DiagMode. + Diagnose([]byte) (string, error) + + // DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest. + DiagnoseFirst([]byte) (string, []byte, error) + + // DiagOptions returns user specified options used to create this DiagMode. + DiagOptions() DiagOptions +} + +// ByteStringEncoding specifies the base encoding that byte strings are notated. +type ByteStringEncoding uint8 + +const ( + // ByteStringBase16Encoding encodes byte strings in base16, without padding. + ByteStringBase16Encoding ByteStringEncoding = iota + + // ByteStringBase32Encoding encodes byte strings in base32, without padding. + ByteStringBase32Encoding + + // ByteStringBase32HexEncoding encodes byte strings in base32hex, without padding. + ByteStringBase32HexEncoding + + // ByteStringBase64Encoding encodes byte strings in base64url, without padding. + ByteStringBase64Encoding + + maxByteStringEncoding +) + +func (bse ByteStringEncoding) valid() error { + if bse >= maxByteStringEncoding { + return errors.New("cbor: invalid ByteStringEncoding " + strconv.Itoa(int(bse))) + } + return nil +} + +// DiagOptions specifies Diag options. +type DiagOptions struct { + // ByteStringEncoding specifies the base encoding that byte strings are notated. + // Default is ByteStringBase16Encoding. + ByteStringEncoding ByteStringEncoding + + // ByteStringHexWhitespace specifies notating with whitespace in byte string + // when ByteStringEncoding is ByteStringBase16Encoding. + ByteStringHexWhitespace bool + + // ByteStringText specifies notating with text in byte string + // if it is a valid UTF-8 text. + ByteStringText bool + + // ByteStringEmbeddedCBOR specifies notating embedded CBOR in byte string + // if it is a valid CBOR bytes. + ByteStringEmbeddedCBOR bool + + // CBORSequence specifies notating CBOR sequences. + // otherwise, it returns an error if there are more bytes after the first CBOR. + CBORSequence bool + + // FloatPrecisionIndicator specifies appending a suffix to indicate float precision. + // Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-encoding-indicators. + FloatPrecisionIndicator bool + + // MaxNestedLevels specifies the max nested levels allowed for any combination of CBOR array, maps, and tags. + // Default is 32 levels and it can be set to [4, 65535]. Note that higher maximum levels of nesting can + // require larger amounts of stack to deserialize. Don't increase this higher than you require. + MaxNestedLevels int + + // MaxArrayElements specifies the max number of elements for CBOR arrays. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxArrayElements int + + // MaxMapPairs specifies the max number of key-value pairs for CBOR maps. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxMapPairs int +} + +// DiagMode returns a DiagMode with immutable options. +func (opts DiagOptions) DiagMode() (DiagMode, error) { + return opts.diagMode() +} + +func (opts DiagOptions) diagMode() (*diagMode, error) { + if err := opts.ByteStringEncoding.valid(); err != nil { + return nil, err + } + + decMode, err := DecOptions{ + MaxNestedLevels: opts.MaxNestedLevels, + MaxArrayElements: opts.MaxArrayElements, + MaxMapPairs: opts.MaxMapPairs, + }.decMode() + if err != nil { + return nil, err + } + + return &diagMode{ + byteStringEncoding: opts.ByteStringEncoding, + byteStringHexWhitespace: opts.ByteStringHexWhitespace, + byteStringText: opts.ByteStringText, + byteStringEmbeddedCBOR: opts.ByteStringEmbeddedCBOR, + cborSequence: opts.CBORSequence, + floatPrecisionIndicator: opts.FloatPrecisionIndicator, + decMode: decMode, + }, nil +} + +type diagMode struct { + byteStringEncoding ByteStringEncoding + byteStringHexWhitespace bool + byteStringText bool + byteStringEmbeddedCBOR bool + cborSequence bool + floatPrecisionIndicator bool + decMode *decMode +} + +// DiagOptions returns user specified options used to create this DiagMode. +func (dm *diagMode) DiagOptions() DiagOptions { + return DiagOptions{ + ByteStringEncoding: dm.byteStringEncoding, + ByteStringHexWhitespace: dm.byteStringHexWhitespace, + ByteStringText: dm.byteStringText, + ByteStringEmbeddedCBOR: dm.byteStringEmbeddedCBOR, + CBORSequence: dm.cborSequence, + FloatPrecisionIndicator: dm.floatPrecisionIndicator, + MaxNestedLevels: dm.decMode.maxNestedLevels, + MaxArrayElements: dm.decMode.maxArrayElements, + MaxMapPairs: dm.decMode.maxMapPairs, + } +} + +// Diagnose returns extended diagnostic notation (EDN) of CBOR data items using the DiagMode. +func (dm *diagMode) Diagnose(data []byte) (string, error) { + return newDiagnose(data, dm.decMode, dm).diag(dm.cborSequence) +} + +// DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest. +func (dm *diagMode) DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) { + return newDiagnose(data, dm.decMode, dm).diagFirst() +} + +var defaultDiagMode, _ = DiagOptions{}.diagMode() + +// Diagnose returns extended diagnostic notation (EDN) of CBOR data items +// using the default diagnostic mode. +// +// Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-diagnostic-notation. +func Diagnose(data []byte) (string, error) { + return defaultDiagMode.Diagnose(data) +} + +// Diagnose returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest. +func DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) { + return defaultDiagMode.DiagnoseFirst(data) +} + +type diagnose struct { + dm *diagMode + d *decoder + w *bytes.Buffer +} + +func newDiagnose(data []byte, decm *decMode, diagm *diagMode) *diagnose { + return &diagnose{ + dm: diagm, + d: &decoder{data: data, dm: decm}, + w: &bytes.Buffer{}, + } +} + +func (di *diagnose) diag(cborSequence bool) (string, error) { + // CBOR Sequence + firstItem := true + for { + switch err := di.wellformed(cborSequence); err { + case nil: + if !firstItem { + di.w.WriteString(", ") + } + firstItem = false + if itemErr := di.item(); itemErr != nil { + return di.w.String(), itemErr + } + + case io.EOF: + if firstItem { + return di.w.String(), err + } + return di.w.String(), nil + + default: + return di.w.String(), err + } + } +} + +func (di *diagnose) diagFirst() (diagNotation string, rest []byte, err error) { + err = di.wellformed(true) + if err == nil { + err = di.item() + } + + if err == nil { + // Return EDN and the rest of the data slice (which might be len 0) + return di.w.String(), di.d.data[di.d.off:], nil + } + + return di.w.String(), nil, err +} + +func (di *diagnose) wellformed(allowExtraData bool) error { + off := di.d.off + err := di.d.wellformed(allowExtraData, false) + di.d.off = off + return err +} + +func (di *diagnose) item() error { //nolint:gocyclo + initialByte := di.d.data[di.d.off] + switch initialByte { + case cborByteStringWithIndefiniteLengthHead, + cborTextStringWithIndefiniteLengthHead: // indefinite-length byte/text string + di.d.off++ + if isBreakFlag(di.d.data[di.d.off]) { + di.d.off++ + switch initialByte { + case cborByteStringWithIndefiniteLengthHead: + // indefinite-length bytes with no chunks. + di.w.WriteString(`''_`) + return nil + case cborTextStringWithIndefiniteLengthHead: + // indefinite-length text with no chunks. + di.w.WriteString(`""_`) + return nil + } + } + + di.w.WriteString("(_ ") + + i := 0 + for !di.d.foundBreak() { + if i > 0 { + di.w.WriteString(", ") + } + + i++ + // wellformedIndefiniteString() already checked that the next item is a byte/text string. + if err := di.item(); err != nil { + return err + } + } + + di.w.WriteByte(')') + return nil + + case cborArrayWithIndefiniteLengthHead: // indefinite-length array + di.d.off++ + di.w.WriteString("[_ ") + + i := 0 + for !di.d.foundBreak() { + if i > 0 { + di.w.WriteString(", ") + } + + i++ + if err := di.item(); err != nil { + return err + } + } + + di.w.WriteByte(']') + return nil + + case cborMapWithIndefiniteLengthHead: // indefinite-length map + di.d.off++ + di.w.WriteString("{_ ") + + i := 0 + for !di.d.foundBreak() { + if i > 0 { + di.w.WriteString(", ") + } + + i++ + // key + if err := di.item(); err != nil { + return err + } + + di.w.WriteString(": ") + + // value + if err := di.item(); err != nil { + return err + } + } + + di.w.WriteByte('}') + return nil + } + + t := di.d.nextCBORType() + switch t { + case cborTypePositiveInt: + _, _, val := di.d.getHead() + di.w.WriteString(strconv.FormatUint(val, 10)) + return nil + + case cborTypeNegativeInt: + _, _, val := di.d.getHead() + if val > math.MaxInt64 { + // CBOR negative integer overflows int64, use big.Int to store value. + bi := new(big.Int) + bi.SetUint64(val) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + di.w.WriteString(bi.String()) + return nil + } + + nValue := int64(-1) ^ int64(val) + di.w.WriteString(strconv.FormatInt(nValue, 10)) + return nil + + case cborTypeByteString: + b, _ := di.d.parseByteString() + return di.encodeByteString(b) + + case cborTypeTextString: + b, err := di.d.parseTextString() + if err != nil { + return err + } + return di.encodeTextString(string(b), '"') + + case cborTypeArray: + _, _, val := di.d.getHead() + count := int(val) + di.w.WriteByte('[') + + for i := 0; i < count; i++ { + if i > 0 { + di.w.WriteString(", ") + } + if err := di.item(); err != nil { + return err + } + } + di.w.WriteByte(']') + return nil + + case cborTypeMap: + _, _, val := di.d.getHead() + count := int(val) + di.w.WriteByte('{') + + for i := 0; i < count; i++ { + if i > 0 { + di.w.WriteString(", ") + } + // key + if err := di.item(); err != nil { + return err + } + di.w.WriteString(": ") + // value + if err := di.item(); err != nil { + return err + } + } + di.w.WriteByte('}') + return nil + + case cborTypeTag: + _, _, tagNum := di.d.getHead() + switch tagNum { + case tagNumUnsignedBignum: + if nt := di.d.nextCBORType(); nt != cborTypeByteString { + return newInadmissibleTagContentTypeError( + tagNumUnsignedBignum, + "byte string", + nt.String()) + } + + b, _ := di.d.parseByteString() + bi := new(big.Int).SetBytes(b) + di.w.WriteString(bi.String()) + return nil + + case tagNumNegativeBignum: + if nt := di.d.nextCBORType(); nt != cborTypeByteString { + return newInadmissibleTagContentTypeError( + tagNumNegativeBignum, + "byte string", + nt.String(), + ) + } + + b, _ := di.d.parseByteString() + bi := new(big.Int).SetBytes(b) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + di.w.WriteString(bi.String()) + return nil + + default: + di.w.WriteString(strconv.FormatUint(tagNum, 10)) + di.w.WriteByte('(') + if err := di.item(); err != nil { + return err + } + di.w.WriteByte(')') + return nil + } + + case cborTypePrimitives: + _, ai, val := di.d.getHead() + switch ai { + case additionalInformationAsFalse: + di.w.WriteString("false") + return nil + + case additionalInformationAsTrue: + di.w.WriteString("true") + return nil + + case additionalInformationAsNull: + di.w.WriteString("null") + return nil + + case additionalInformationAsUndefined: + di.w.WriteString("undefined") + return nil + + case additionalInformationAsFloat16, + additionalInformationAsFloat32, + additionalInformationAsFloat64: + return di.encodeFloat(ai, val) + + default: + di.w.WriteString("simple(") + di.w.WriteString(strconv.FormatUint(val, 10)) + di.w.WriteByte(')') + return nil + } + } + + return nil +} + +// writeU16 format a rune as "\uxxxx" +func (di *diagnose) writeU16(val rune) { + di.w.WriteString("\\u") + var in [2]byte + in[0] = byte(val >> 8) + in[1] = byte(val) + sz := hex.EncodedLen(len(in)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + hex.Encode(dst, in[:]) + di.w.Write(dst) +} + +var rawBase32Encoding = base32.StdEncoding.WithPadding(base32.NoPadding) +var rawBase32HexEncoding = base32.HexEncoding.WithPadding(base32.NoPadding) + +func (di *diagnose) encodeByteString(val []byte) error { + if len(val) > 0 { + if di.dm.byteStringText && utf8.Valid(val) { + return di.encodeTextString(string(val), '\'') + } + + if di.dm.byteStringEmbeddedCBOR { + di2 := newDiagnose(val, di.dm.decMode, di.dm) + // should always notating embedded CBOR sequence. + if str, err := di2.diag(true); err == nil { + di.w.WriteString("<<") + di.w.WriteString(str) + di.w.WriteString(">>") + return nil + } + } + } + + switch di.dm.byteStringEncoding { + case ByteStringBase16Encoding: + di.w.WriteString("h'") + if di.dm.byteStringHexWhitespace { + sz := hex.EncodedLen(len(val)) + if len(val) > 0 { + sz += len(val) - 1 + } + di.w.Grow(sz) + + dst := di.w.Bytes()[di.w.Len():] + for i := range val { + if i > 0 { + dst = append(dst, ' ') + } + hex.Encode(dst[len(dst):len(dst)+2], val[i:i+1]) + dst = dst[:len(dst)+2] + } + di.w.Write(dst) + } else { + sz := hex.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + hex.Encode(dst, val) + di.w.Write(dst) + } + di.w.WriteByte('\'') + return nil + + case ByteStringBase32Encoding: + di.w.WriteString("b32'") + sz := rawBase32Encoding.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + rawBase32Encoding.Encode(dst, val) + di.w.Write(dst) + di.w.WriteByte('\'') + return nil + + case ByteStringBase32HexEncoding: + di.w.WriteString("h32'") + sz := rawBase32HexEncoding.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + rawBase32HexEncoding.Encode(dst, val) + di.w.Write(dst) + di.w.WriteByte('\'') + return nil + + case ByteStringBase64Encoding: + di.w.WriteString("b64'") + sz := base64.RawURLEncoding.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + base64.RawURLEncoding.Encode(dst, val) + di.w.Write(dst) + di.w.WriteByte('\'') + return nil + + default: + // It should not be possible for users to construct a *diagMode with an invalid byte + // string encoding. + panic(fmt.Sprintf("diagmode has invalid ByteStringEncoding %v", di.dm.byteStringEncoding)) + } +} + +const utf16SurrSelf = rune(0x10000) + +// quote should be either `'` or `"` +func (di *diagnose) encodeTextString(val string, quote byte) error { + di.w.WriteByte(quote) + + for i := 0; i < len(val); { + if b := val[i]; b < utf8.RuneSelf { + switch { + case b == '\t', b == '\n', b == '\r', b == '\\', b == quote: + di.w.WriteByte('\\') + + switch b { + case '\t': + b = 't' + case '\n': + b = 'n' + case '\r': + b = 'r' + } + di.w.WriteByte(b) + + case b >= ' ' && b <= '~': + di.w.WriteByte(b) + + default: + di.writeU16(rune(b)) + } + + i++ + continue + } + + c, size := utf8.DecodeRuneInString(val[i:]) + switch { + case c == utf8.RuneError: + return &SemanticError{"cbor: invalid UTF-8 string"} + + case c < utf16SurrSelf: + di.writeU16(c) + + default: + c1, c2 := utf16.EncodeRune(c) + di.writeU16(c1) + di.writeU16(c2) + } + + i += size + } + + di.w.WriteByte(quote) + return nil +} + +func (di *diagnose) encodeFloat(ai byte, val uint64) error { + f64 := float64(0) + switch ai { + case additionalInformationAsFloat16: + f16 := float16.Frombits(uint16(val)) + switch { + case f16.IsNaN(): + di.w.WriteString("NaN") + return nil + case f16.IsInf(1): + di.w.WriteString("Infinity") + return nil + case f16.IsInf(-1): + di.w.WriteString("-Infinity") + return nil + default: + f64 = float64(f16.Float32()) + } + + case additionalInformationAsFloat32: + f32 := math.Float32frombits(uint32(val)) + switch { + case f32 != f32: + di.w.WriteString("NaN") + return nil + case f32 > math.MaxFloat32: + di.w.WriteString("Infinity") + return nil + case f32 < -math.MaxFloat32: + di.w.WriteString("-Infinity") + return nil + default: + f64 = float64(f32) + } + + case additionalInformationAsFloat64: + f64 = math.Float64frombits(val) + switch { + case f64 != f64: + di.w.WriteString("NaN") + return nil + case f64 > math.MaxFloat64: + di.w.WriteString("Infinity") + return nil + case f64 < -math.MaxFloat64: + di.w.WriteString("-Infinity") + return nil + } + } + // Use ES6 number to string conversion which should match most JSON generators. + // Inspired by https://github.com/golang/go/blob/4df10fba1687a6d4f51d7238a403f8f2298f6a16/src/encoding/json/encode.go#L585 + const bitSize = 64 + b := make([]byte, 0, 32) + if abs := math.Abs(f64); abs != 0 && (abs < 1e-6 || abs >= 1e21) { + b = strconv.AppendFloat(b, f64, 'e', -1, bitSize) + // clean up e-09 to e-9 + n := len(b) + if n >= 4 && string(b[n-4:n-1]) == "e-0" { + b = append(b[:n-2], b[n-1]) + } + } else { + b = strconv.AppendFloat(b, f64, 'f', -1, bitSize) + } + + // add decimal point and trailing zero if needed + if bytes.IndexByte(b, '.') < 0 { + if i := bytes.IndexByte(b, 'e'); i < 0 { + b = append(b, '.', '0') + } else { + b = append(b[:i+2], b[i:]...) + b[i] = '.' + b[i+1] = '0' + } + } + + di.w.WriteString(string(b)) + + if di.dm.floatPrecisionIndicator { + switch ai { + case additionalInformationAsFloat16: + di.w.WriteString("_1") + return nil + + case additionalInformationAsFloat32: + di.w.WriteString("_2") + return nil + + case additionalInformationAsFloat64: + di.w.WriteString("_3") + return nil + } + } + + return nil +} diff --git a/go-controller/vendor/github.com/fxamacker/cbor/v2/doc.go b/go-controller/vendor/github.com/fxamacker/cbor/v2/doc.go new file mode 100644 index 0000000000..23f68b984c --- /dev/null +++ b/go-controller/vendor/github.com/fxamacker/cbor/v2/doc.go @@ -0,0 +1,129 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +/* +Package cbor is a modern CBOR codec (RFC 8949 & RFC 7049) with CBOR tags, +Go struct tags (toarray/keyasint/omitempty), Core Deterministic Encoding, +CTAP2, Canonical CBOR, float64->32->16, and duplicate map key detection. + +Encoding options allow "preferred serialization" by encoding integers and floats +to their smallest forms (e.g. float16) when values fit. + +Struct tags like "keyasint", "toarray" and "omitempty" make CBOR data smaller +and easier to use with structs. + +For example, "toarray" tag makes struct fields encode to CBOR array elements. And +"keyasint" makes a field encode to an element of CBOR map with specified int key. + +Latest docs can be viewed at https://github.com/fxamacker/cbor#cbor-library-in-go + +# Basics + +The Quick Start guide is at https://github.com/fxamacker/cbor#quick-start + +Function signatures identical to encoding/json include: + + Marshal, Unmarshal, NewEncoder, NewDecoder, (*Encoder).Encode, (*Decoder).Decode. + +Standard interfaces include: + + BinaryMarshaler, BinaryUnmarshaler, Marshaler, and Unmarshaler. + +Custom encoding and decoding is possible by implementing standard interfaces for +user-defined Go types. + +Codec functions are available at package-level (using defaults options) or by +creating modes from options at runtime. + +"Mode" in this API means definite way of encoding (EncMode) or decoding (DecMode). + +EncMode and DecMode interfaces are created from EncOptions or DecOptions structs. + + em, err := cbor.EncOptions{...}.EncMode() + em, err := cbor.CanonicalEncOptions().EncMode() + em, err := cbor.CTAP2EncOptions().EncMode() + +Modes use immutable options to avoid side-effects and simplify concurrency. Behavior of +modes won't accidentally change at runtime after they're created. + +Modes are intended to be reused and are safe for concurrent use. + +EncMode and DecMode Interfaces + + // EncMode interface uses immutable options and is safe for concurrent use. + type EncMode interface { + Marshal(v interface{}) ([]byte, error) + NewEncoder(w io.Writer) *Encoder + EncOptions() EncOptions // returns copy of options + } + + // DecMode interface uses immutable options and is safe for concurrent use. + type DecMode interface { + Unmarshal(data []byte, v interface{}) error + NewDecoder(r io.Reader) *Decoder + DecOptions() DecOptions // returns copy of options + } + +Using Default Encoding Mode + + b, err := cbor.Marshal(v) + + encoder := cbor.NewEncoder(w) + err = encoder.Encode(v) + +Using Default Decoding Mode + + err := cbor.Unmarshal(b, &v) + + decoder := cbor.NewDecoder(r) + err = decoder.Decode(&v) + +Creating and Using Encoding Modes + + // Create EncOptions using either struct literal or a function. + opts := cbor.CanonicalEncOptions() + + // If needed, modify encoding options + opts.Time = cbor.TimeUnix + + // Create reusable EncMode interface with immutable options, safe for concurrent use. + em, err := opts.EncMode() + + // Use EncMode like encoding/json, with same function signatures. + b, err := em.Marshal(v) + // or + encoder := em.NewEncoder(w) + err := encoder.Encode(v) + + // NOTE: Both em.Marshal(v) and encoder.Encode(v) use encoding options + // specified during creation of em (encoding mode). + +# CBOR Options + +Predefined Encoding Options: https://github.com/fxamacker/cbor#predefined-encoding-options + +Encoding Options: https://github.com/fxamacker/cbor#encoding-options + +Decoding Options: https://github.com/fxamacker/cbor#decoding-options + +# Struct Tags + +Struct tags like `cbor:"name,omitempty"` and `json:"name,omitempty"` work as expected. +If both struct tags are specified then `cbor` is used. + +Struct tags like "keyasint", "toarray", and "omitempty" make it easy to use +very compact formats like COSE and CWT (CBOR Web Tokens) with structs. + +For example, "toarray" makes struct fields encode to array elements. And "keyasint" +makes struct fields encode to elements of CBOR map with int keys. + +https://raw.githubusercontent.com/fxamacker/images/master/cbor/v2.0.0/cbor_easy_api.png + +Struct tags are listed at https://github.com/fxamacker/cbor#struct-tags-1 + +# Tests and Fuzzing + +Over 375 tests are included in this package. Cover-guided fuzzing is handled by +a private fuzzer that replaced fxamacker/cbor-fuzz years ago. +*/ +package cbor diff --git a/go-controller/vendor/github.com/fxamacker/cbor/v2/encode.go b/go-controller/vendor/github.com/fxamacker/cbor/v2/encode.go new file mode 100644 index 0000000000..6508e291d6 --- /dev/null +++ b/go-controller/vendor/github.com/fxamacker/cbor/v2/encode.go @@ -0,0 +1,1989 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "encoding" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "math/big" + "math/rand" + "reflect" + "sort" + "strconv" + "sync" + "time" + + "github.com/x448/float16" +) + +// Marshal returns the CBOR encoding of v using default encoding options. +// See EncOptions for encoding options. +// +// Marshal uses the following encoding rules: +// +// If value implements the Marshaler interface, Marshal calls its +// MarshalCBOR method. +// +// If value implements encoding.BinaryMarshaler, Marhsal calls its +// MarshalBinary method and encode it as CBOR byte string. +// +// Boolean values encode as CBOR booleans (type 7). +// +// Positive integer values encode as CBOR positive integers (type 0). +// +// Negative integer values encode as CBOR negative integers (type 1). +// +// Floating point values encode as CBOR floating points (type 7). +// +// String values encode as CBOR text strings (type 3). +// +// []byte values encode as CBOR byte strings (type 2). +// +// Array and slice values encode as CBOR arrays (type 4). +// +// Map values encode as CBOR maps (type 5). +// +// Struct values encode as CBOR maps (type 5). Each exported struct field +// becomes a pair with field name encoded as CBOR text string (type 3) and +// field value encoded based on its type. See struct tag option "keyasint" +// to encode field name as CBOR integer (type 0 and 1). Also see struct +// tag option "toarray" for special field "_" to encode struct values as +// CBOR array (type 4). +// +// Marshal supports format string stored under the "cbor" key in the struct +// field's tag. CBOR format string can specify the name of the field, +// "omitempty" and "keyasint" options, and special case "-" for field omission. +// If "cbor" key is absent, Marshal uses "json" key. +// +// Struct field name is treated as integer if it has "keyasint" option in +// its format string. The format string must specify an integer as its +// field name. +// +// Special struct field "_" is used to specify struct level options, such as +// "toarray". "toarray" option enables Go struct to be encoded as CBOR array. +// "omitempty" is disabled by "toarray" to ensure that the same number +// of elements are encoded every time. +// +// Anonymous struct fields are marshaled as if their exported fields +// were fields in the outer struct. Marshal follows the same struct fields +// visibility rules used by JSON encoding package. +// +// time.Time values encode as text strings specified in RFC3339 or numerical +// representation of seconds since January 1, 1970 UTC depending on +// EncOptions.Time setting. Also See EncOptions.TimeTag to encode +// time.Time as CBOR tag with tag number 0 or 1. +// +// big.Int values encode as CBOR integers (type 0 and 1) if values fit. +// Otherwise, big.Int values encode as CBOR bignums (tag 2 and 3). See +// EncOptions.BigIntConvert to always encode big.Int values as CBOR +// bignums. +// +// Pointer values encode as the value pointed to. +// +// Interface values encode as the value stored in the interface. +// +// Nil slice/map/pointer/interface values encode as CBOR nulls (type 7). +// +// Values of other types cannot be encoded in CBOR. Attempting +// to encode such a value causes Marshal to return an UnsupportedTypeError. +func Marshal(v interface{}) ([]byte, error) { + return defaultEncMode.Marshal(v) +} + +// MarshalToBuffer encodes v into provided buffer (instead of using built-in buffer pool) +// and uses default encoding options. +// +// NOTE: Unlike Marshal, the buffer provided to MarshalToBuffer can contain +// partially encoded data if error is returned. +// +// See Marshal for more details. +func MarshalToBuffer(v interface{}, buf *bytes.Buffer) error { + return defaultEncMode.MarshalToBuffer(v, buf) +} + +// Marshaler is the interface implemented by types that can marshal themselves +// into valid CBOR. +type Marshaler interface { + MarshalCBOR() ([]byte, error) +} + +// MarshalerError represents error from checking encoded CBOR data item +// returned from MarshalCBOR for well-formedness and some very limited tag validation. +type MarshalerError struct { + typ reflect.Type + err error +} + +func (e *MarshalerError) Error() string { + return "cbor: error calling MarshalCBOR for type " + + e.typ.String() + + ": " + e.err.Error() +} + +func (e *MarshalerError) Unwrap() error { + return e.err +} + +// UnsupportedTypeError is returned by Marshal when attempting to encode value +// of an unsupported type. +type UnsupportedTypeError struct { + Type reflect.Type +} + +func (e *UnsupportedTypeError) Error() string { + return "cbor: unsupported type: " + e.Type.String() +} + +// UnsupportedValueError is returned by Marshal when attempting to encode an +// unsupported value. +type UnsupportedValueError struct { + msg string +} + +func (e *UnsupportedValueError) Error() string { + return "cbor: unsupported value: " + e.msg +} + +// SortMode identifies supported sorting order. +type SortMode int + +const ( + // SortNone encodes map pairs and struct fields in an arbitrary order. + SortNone SortMode = 0 + + // SortLengthFirst causes map keys or struct fields to be sorted such that: + // - If two keys have different lengths, the shorter one sorts earlier; + // - If two keys have the same length, the one with the lower value in + // (byte-wise) lexical order sorts earlier. + // It is used in "Canonical CBOR" encoding in RFC 7049 3.9. + SortLengthFirst SortMode = 1 + + // SortBytewiseLexical causes map keys or struct fields to be sorted in the + // bytewise lexicographic order of their deterministic CBOR encodings. + // It is used in "CTAP2 Canonical CBOR" and "Core Deterministic Encoding" + // in RFC 7049bis. + SortBytewiseLexical SortMode = 2 + + // SortShuffle encodes map pairs and struct fields in a shuffled + // order. This mode does not guarantee an unbiased permutation, but it + // does guarantee that the runtime of the shuffle algorithm used will be + // constant. + SortFastShuffle SortMode = 3 + + // SortCanonical is used in "Canonical CBOR" encoding in RFC 7049 3.9. + SortCanonical SortMode = SortLengthFirst + + // SortCTAP2 is used in "CTAP2 Canonical CBOR". + SortCTAP2 SortMode = SortBytewiseLexical + + // SortCoreDeterministic is used in "Core Deterministic Encoding" in RFC 7049bis. + SortCoreDeterministic SortMode = SortBytewiseLexical + + maxSortMode SortMode = 4 +) + +func (sm SortMode) valid() bool { + return sm >= 0 && sm < maxSortMode +} + +// StringMode specifies how to encode Go string values. +type StringMode int + +const ( + // StringToTextString encodes Go string to CBOR text string (major type 3). + StringToTextString StringMode = iota + + // StringToByteString encodes Go string to CBOR byte string (major type 2). + StringToByteString +) + +func (st StringMode) cborType() (cborType, error) { + switch st { + case StringToTextString: + return cborTypeTextString, nil + + case StringToByteString: + return cborTypeByteString, nil + } + return 0, errors.New("cbor: invalid StringType " + strconv.Itoa(int(st))) +} + +// ShortestFloatMode specifies which floating-point format should +// be used as the shortest possible format for CBOR encoding. +// It is not used for encoding Infinity and NaN values. +type ShortestFloatMode int + +const ( + // ShortestFloatNone makes float values encode without any conversion. + // This is the default for ShortestFloatMode in v1. + // E.g. a float32 in Go will encode to CBOR float32. And + // a float64 in Go will encode to CBOR float64. + ShortestFloatNone ShortestFloatMode = iota + + // ShortestFloat16 specifies float16 as the shortest form that preserves value. + // E.g. if float64 can convert to float32 while preserving value, then + // encoding will also try to convert float32 to float16. So a float64 might + // encode as CBOR float64, float32 or float16 depending on the value. + ShortestFloat16 + + maxShortestFloat +) + +func (sfm ShortestFloatMode) valid() bool { + return sfm >= 0 && sfm < maxShortestFloat +} + +// NaNConvertMode specifies how to encode NaN and overrides ShortestFloatMode. +// ShortestFloatMode is not used for encoding Infinity and NaN values. +type NaNConvertMode int + +const ( + // NaNConvert7e00 always encodes NaN to 0xf97e00 (CBOR float16 = 0x7e00). + NaNConvert7e00 NaNConvertMode = iota + + // NaNConvertNone never modifies or converts NaN to other representations + // (float64 NaN stays float64, etc. even if it can use float16 without losing + // any bits). + NaNConvertNone + + // NaNConvertPreserveSignal converts NaN to the smallest form that preserves + // value (quiet bit + payload) as described in RFC 7049bis Draft 12. + NaNConvertPreserveSignal + + // NaNConvertQuiet always forces quiet bit = 1 and shortest form that preserves + // NaN payload. + NaNConvertQuiet + + // NaNConvertReject returns UnsupportedValueError on attempts to encode a NaN value. + NaNConvertReject + + maxNaNConvert +) + +func (ncm NaNConvertMode) valid() bool { + return ncm >= 0 && ncm < maxNaNConvert +} + +// InfConvertMode specifies how to encode Infinity and overrides ShortestFloatMode. +// ShortestFloatMode is not used for encoding Infinity and NaN values. +type InfConvertMode int + +const ( + // InfConvertFloat16 always converts Inf to lossless IEEE binary16 (float16). + InfConvertFloat16 InfConvertMode = iota + + // InfConvertNone never converts (used by CTAP2 Canonical CBOR). + InfConvertNone + + // InfConvertReject returns UnsupportedValueError on attempts to encode an infinite value. + InfConvertReject + + maxInfConvert +) + +func (icm InfConvertMode) valid() bool { + return icm >= 0 && icm < maxInfConvert +} + +// TimeMode specifies how to encode time.Time values. +type TimeMode int + +const ( + // TimeUnix causes time.Time to be encoded as epoch time in integer with second precision. + TimeUnix TimeMode = iota + + // TimeUnixMicro causes time.Time to be encoded as epoch time in float-point rounded to microsecond precision. + TimeUnixMicro + + // TimeUnixDynamic causes time.Time to be encoded as integer if time.Time doesn't have fractional seconds, + // otherwise float-point rounded to microsecond precision. + TimeUnixDynamic + + // TimeRFC3339 causes time.Time to be encoded as RFC3339 formatted string with second precision. + TimeRFC3339 + + // TimeRFC3339Nano causes time.Time to be encoded as RFC3339 formatted string with nanosecond precision. + TimeRFC3339Nano + + maxTimeMode +) + +func (tm TimeMode) valid() bool { + return tm >= 0 && tm < maxTimeMode +} + +// BigIntConvertMode specifies how to encode big.Int values. +type BigIntConvertMode int + +const ( + // BigIntConvertShortest makes big.Int encode to CBOR integer if value fits. + // E.g. if big.Int value can be converted to CBOR integer while preserving + // value, encoder will encode it to CBOR integer (major type 0 or 1). + BigIntConvertShortest BigIntConvertMode = iota + + // BigIntConvertNone makes big.Int encode to CBOR bignum (tag 2 or 3) without + // converting it to another CBOR type. + BigIntConvertNone + + // BigIntConvertReject returns an UnsupportedTypeError instead of marshaling a big.Int. + BigIntConvertReject + + maxBigIntConvert +) + +func (bim BigIntConvertMode) valid() bool { + return bim >= 0 && bim < maxBigIntConvert +} + +// NilContainersMode specifies how to encode nil slices and maps. +type NilContainersMode int + +const ( + // NilContainerAsNull encodes nil slices and maps as CBOR null. + // This is the default. + NilContainerAsNull NilContainersMode = iota + + // NilContainerAsEmpty encodes nil slices and maps as + // empty container (CBOR bytestring, array, or map). + NilContainerAsEmpty + + maxNilContainersMode +) + +func (m NilContainersMode) valid() bool { + return m >= 0 && m < maxNilContainersMode +} + +// OmitEmptyMode specifies how to encode struct fields with omitempty tag. +// The default behavior omits if field value would encode as empty CBOR value. +type OmitEmptyMode int + +const ( + // OmitEmptyCBORValue specifies that struct fields tagged with "omitempty" + // should be omitted from encoding if the field would be encoded as an empty + // CBOR value, such as CBOR false, 0, 0.0, nil, empty byte, empty string, + // empty array, or empty map. + OmitEmptyCBORValue OmitEmptyMode = iota + + // OmitEmptyGoValue specifies that struct fields tagged with "omitempty" + // should be omitted from encoding if the field has an empty Go value, + // defined as false, 0, 0.0, a nil pointer, a nil interface value, and + // any empty array, slice, map, or string. + // This behavior is the same as the current (aka v1) encoding/json package + // included in Go. + OmitEmptyGoValue + + maxOmitEmptyMode +) + +func (om OmitEmptyMode) valid() bool { + return om >= 0 && om < maxOmitEmptyMode +} + +// FieldNameMode specifies the CBOR type to use when encoding struct field names. +type FieldNameMode int + +const ( + // FieldNameToTextString encodes struct fields to CBOR text string (major type 3). + FieldNameToTextString FieldNameMode = iota + + // FieldNameToTextString encodes struct fields to CBOR byte string (major type 2). + FieldNameToByteString + + maxFieldNameMode +) + +func (fnm FieldNameMode) valid() bool { + return fnm >= 0 && fnm < maxFieldNameMode +} + +// ByteSliceLaterFormatMode specifies which later format conversion hint (CBOR tag 21-23) +// to include (if any) when encoding Go byte slice to CBOR byte string. The encoder will +// always encode unmodified bytes from the byte slice and just wrap it within +// CBOR tag 21, 22, or 23 if specified. +// See "Expected Later Encoding for CBOR-to-JSON Converters" in RFC 8949 Section 3.4.5.2. +type ByteSliceLaterFormatMode int + +const ( + // ByteSliceLaterFormatNone encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // without adding CBOR tag 21, 22, or 23. + ByteSliceLaterFormatNone ByteSliceLaterFormatMode = iota + + // ByteSliceLaterFormatBase64URL encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // inside CBOR tag 21 (expected later conversion to base64url encoding, see RFC 8949 Section 3.4.5.2). + ByteSliceLaterFormatBase64URL + + // ByteSliceLaterFormatBase64 encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // inside CBOR tag 22 (expected later conversion to base64 encoding, see RFC 8949 Section 3.4.5.2). + ByteSliceLaterFormatBase64 + + // ByteSliceLaterFormatBase16 encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // inside CBOR tag 23 (expected later conversion to base16 encoding, see RFC 8949 Section 3.4.5.2). + ByteSliceLaterFormatBase16 +) + +func (bsefm ByteSliceLaterFormatMode) encodingTag() (uint64, error) { + switch bsefm { + case ByteSliceLaterFormatNone: + return 0, nil + + case ByteSliceLaterFormatBase64URL: + return tagNumExpectedLaterEncodingBase64URL, nil + + case ByteSliceLaterFormatBase64: + return tagNumExpectedLaterEncodingBase64, nil + + case ByteSliceLaterFormatBase16: + return tagNumExpectedLaterEncodingBase16, nil + } + return 0, errors.New("cbor: invalid ByteSliceLaterFormat " + strconv.Itoa(int(bsefm))) +} + +// ByteArrayMode specifies how to encode byte arrays. +type ByteArrayMode int + +const ( + // ByteArrayToByteSlice encodes byte arrays the same way that a byte slice with identical + // length and contents is encoded. + ByteArrayToByteSlice ByteArrayMode = iota + + // ByteArrayToArray encodes byte arrays to the CBOR array type with one unsigned integer + // item for each byte in the array. + ByteArrayToArray + + maxByteArrayMode +) + +func (bam ByteArrayMode) valid() bool { + return bam >= 0 && bam < maxByteArrayMode +} + +// BinaryMarshalerMode specifies how to encode types that implement encoding.BinaryMarshaler. +type BinaryMarshalerMode int + +const ( + // BinaryMarshalerByteString encodes the output of MarshalBinary to a CBOR byte string. + BinaryMarshalerByteString BinaryMarshalerMode = iota + + // BinaryMarshalerNone does not recognize BinaryMarshaler implementations during encode. + BinaryMarshalerNone + + maxBinaryMarshalerMode +) + +func (bmm BinaryMarshalerMode) valid() bool { + return bmm >= 0 && bmm < maxBinaryMarshalerMode +} + +// EncOptions specifies encoding options. +type EncOptions struct { + // Sort specifies sorting order. + Sort SortMode + + // ShortestFloat specifies the shortest floating-point encoding that preserves + // the value being encoded. + ShortestFloat ShortestFloatMode + + // NaNConvert specifies how to encode NaN and it overrides ShortestFloatMode. + NaNConvert NaNConvertMode + + // InfConvert specifies how to encode Inf and it overrides ShortestFloatMode. + InfConvert InfConvertMode + + // BigIntConvert specifies how to encode big.Int values. + BigIntConvert BigIntConvertMode + + // Time specifies how to encode time.Time. + Time TimeMode + + // TimeTag allows time.Time to be encoded with a tag number. + // RFC3339 format gets tag number 0, and numeric epoch time tag number 1. + TimeTag EncTagMode + + // IndefLength specifies whether to allow indefinite length CBOR items. + IndefLength IndefLengthMode + + // NilContainers specifies how to encode nil slices and maps. + NilContainers NilContainersMode + + // TagsMd specifies whether to allow CBOR tags (major type 6). + TagsMd TagsMode + + // OmitEmptyMode specifies how to encode struct fields with omitempty tag. + OmitEmpty OmitEmptyMode + + // String specifies which CBOR type to use when encoding Go strings. + // - CBOR text string (major type 3) is default + // - CBOR byte string (major type 2) + String StringMode + + // FieldName specifies the CBOR type to use when encoding struct field names. + FieldName FieldNameMode + + // ByteSliceLaterFormat specifies which later format conversion hint (CBOR tag 21-23) + // to include (if any) when encoding Go byte slice to CBOR byte string. The encoder will + // always encode unmodified bytes from the byte slice and just wrap it within + // CBOR tag 21, 22, or 23 if specified. + // See "Expected Later Encoding for CBOR-to-JSON Converters" in RFC 8949 Section 3.4.5.2. + ByteSliceLaterFormat ByteSliceLaterFormatMode + + // ByteArray specifies how to encode byte arrays. + ByteArray ByteArrayMode + + // BinaryMarshaler specifies how to encode types that implement encoding.BinaryMarshaler. + BinaryMarshaler BinaryMarshalerMode +} + +// CanonicalEncOptions returns EncOptions for "Canonical CBOR" encoding, +// defined in RFC 7049 Section 3.9 with the following rules: +// +// 1. "Integers must be as small as possible." +// 2. "The expression of lengths in major types 2 through 5 must be as short as possible." +// 3. The keys in every map must be sorted in length-first sorting order. +// See SortLengthFirst for details. +// 4. "Indefinite-length items must be made into definite-length items." +// 5. "If a protocol allows for IEEE floats, then additional canonicalization rules might +// need to be added. One example rule might be to have all floats start as a 64-bit +// float, then do a test conversion to a 32-bit float; if the result is the same numeric +// value, use the shorter value and repeat the process with a test conversion to a +// 16-bit float. (This rule selects 16-bit float for positive and negative Infinity +// as well.) Also, there are many representations for NaN. If NaN is an allowed value, +// it must always be represented as 0xf97e00." +func CanonicalEncOptions() EncOptions { + return EncOptions{ + Sort: SortCanonical, + ShortestFloat: ShortestFloat16, + NaNConvert: NaNConvert7e00, + InfConvert: InfConvertFloat16, + IndefLength: IndefLengthForbidden, + } +} + +// CTAP2EncOptions returns EncOptions for "CTAP2 Canonical CBOR" encoding, +// defined in CTAP specification, with the following rules: +// +// 1. "Integers must be encoded as small as possible." +// 2. "The representations of any floating-point values are not changed." +// 3. "The expression of lengths in major types 2 through 5 must be as short as possible." +// 4. "Indefinite-length items must be made into definite-length items."" +// 5. The keys in every map must be sorted in bytewise lexicographic order. +// See SortBytewiseLexical for details. +// 6. "Tags as defined in Section 2.4 in [RFC7049] MUST NOT be present." +func CTAP2EncOptions() EncOptions { + return EncOptions{ + Sort: SortCTAP2, + ShortestFloat: ShortestFloatNone, + NaNConvert: NaNConvertNone, + InfConvert: InfConvertNone, + IndefLength: IndefLengthForbidden, + TagsMd: TagsForbidden, + } +} + +// CoreDetEncOptions returns EncOptions for "Core Deterministic" encoding, +// defined in RFC 7049bis with the following rules: +// +// 1. "Preferred serialization MUST be used. In particular, this means that arguments +// (see Section 3) for integers, lengths in major types 2 through 5, and tags MUST +// be as short as possible" +// "Floating point values also MUST use the shortest form that preserves the value" +// 2. "Indefinite-length items MUST NOT appear." +// 3. "The keys in every map MUST be sorted in the bytewise lexicographic order of +// their deterministic encodings." +func CoreDetEncOptions() EncOptions { + return EncOptions{ + Sort: SortCoreDeterministic, + ShortestFloat: ShortestFloat16, + NaNConvert: NaNConvert7e00, + InfConvert: InfConvertFloat16, + IndefLength: IndefLengthForbidden, + } +} + +// PreferredUnsortedEncOptions returns EncOptions for "Preferred Serialization" encoding, +// defined in RFC 7049bis with the following rules: +// +// 1. "The preferred serialization always uses the shortest form of representing the argument +// (Section 3);" +// 2. "it also uses the shortest floating-point encoding that preserves the value being +// encoded (see Section 5.5)." +// "The preferred encoding for a floating-point value is the shortest floating-point encoding +// that preserves its value, e.g., 0xf94580 for the number 5.5, and 0xfa45ad9c00 for the +// number 5555.5, unless the CBOR-based protocol specifically excludes the use of the shorter +// floating-point encodings. For NaN values, a shorter encoding is preferred if zero-padding +// the shorter significand towards the right reconstitutes the original NaN value (for many +// applications, the single NaN encoding 0xf97e00 will suffice)." +// 3. "Definite length encoding is preferred whenever the length is known at the time the +// serialization of the item starts." +func PreferredUnsortedEncOptions() EncOptions { + return EncOptions{ + Sort: SortNone, + ShortestFloat: ShortestFloat16, + NaNConvert: NaNConvert7e00, + InfConvert: InfConvertFloat16, + } +} + +// EncMode returns EncMode with immutable options and no tags (safe for concurrency). +func (opts EncOptions) EncMode() (EncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.encMode() +} + +// UserBufferEncMode returns UserBufferEncMode with immutable options and no tags (safe for concurrency). +func (opts EncOptions) UserBufferEncMode() (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.encMode() +} + +// EncModeWithTags returns EncMode with options and tags that are both immutable (safe for concurrency). +func (opts EncOptions) EncModeWithTags(tags TagSet) (EncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.UserBufferEncModeWithTags(tags) +} + +// UserBufferEncModeWithTags returns UserBufferEncMode with options and tags that are both immutable (safe for concurrency). +func (opts EncOptions) UserBufferEncModeWithTags(tags TagSet) (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam + if opts.TagsMd == TagsForbidden { + return nil, errors.New("cbor: cannot create EncMode with TagSet when TagsMd is TagsForbidden") + } + if tags == nil { + return nil, errors.New("cbor: cannot create EncMode with nil value as TagSet") + } + em, err := opts.encMode() + if err != nil { + return nil, err + } + // Copy tags + ts := tagSet(make(map[reflect.Type]*tagItem)) + syncTags := tags.(*syncTagSet) + syncTags.RLock() + for contentType, tag := range syncTags.t { + if tag.opts.EncTag != EncTagNone { + ts[contentType] = tag + } + } + syncTags.RUnlock() + if len(ts) > 0 { + em.tags = ts + } + return em, nil +} + +// EncModeWithSharedTags returns EncMode with immutable options and mutable shared tags (safe for concurrency). +func (opts EncOptions) EncModeWithSharedTags(tags TagSet) (EncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.UserBufferEncModeWithSharedTags(tags) +} + +// UserBufferEncModeWithSharedTags returns UserBufferEncMode with immutable options and mutable shared tags (safe for concurrency). +func (opts EncOptions) UserBufferEncModeWithSharedTags(tags TagSet) (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam + if opts.TagsMd == TagsForbidden { + return nil, errors.New("cbor: cannot create EncMode with TagSet when TagsMd is TagsForbidden") + } + if tags == nil { + return nil, errors.New("cbor: cannot create EncMode with nil value as TagSet") + } + em, err := opts.encMode() + if err != nil { + return nil, err + } + em.tags = tags + return em, nil +} + +func (opts EncOptions) encMode() (*encMode, error) { //nolint:gocritic // ignore hugeParam + if !opts.Sort.valid() { + return nil, errors.New("cbor: invalid SortMode " + strconv.Itoa(int(opts.Sort))) + } + if !opts.ShortestFloat.valid() { + return nil, errors.New("cbor: invalid ShortestFloatMode " + strconv.Itoa(int(opts.ShortestFloat))) + } + if !opts.NaNConvert.valid() { + return nil, errors.New("cbor: invalid NaNConvertMode " + strconv.Itoa(int(opts.NaNConvert))) + } + if !opts.InfConvert.valid() { + return nil, errors.New("cbor: invalid InfConvertMode " + strconv.Itoa(int(opts.InfConvert))) + } + if !opts.BigIntConvert.valid() { + return nil, errors.New("cbor: invalid BigIntConvertMode " + strconv.Itoa(int(opts.BigIntConvert))) + } + if !opts.Time.valid() { + return nil, errors.New("cbor: invalid TimeMode " + strconv.Itoa(int(opts.Time))) + } + if !opts.TimeTag.valid() { + return nil, errors.New("cbor: invalid TimeTag " + strconv.Itoa(int(opts.TimeTag))) + } + if !opts.IndefLength.valid() { + return nil, errors.New("cbor: invalid IndefLength " + strconv.Itoa(int(opts.IndefLength))) + } + if !opts.NilContainers.valid() { + return nil, errors.New("cbor: invalid NilContainers " + strconv.Itoa(int(opts.NilContainers))) + } + if !opts.TagsMd.valid() { + return nil, errors.New("cbor: invalid TagsMd " + strconv.Itoa(int(opts.TagsMd))) + } + if opts.TagsMd == TagsForbidden && opts.TimeTag == EncTagRequired { + return nil, errors.New("cbor: cannot set TagsMd to TagsForbidden when TimeTag is EncTagRequired") + } + if !opts.OmitEmpty.valid() { + return nil, errors.New("cbor: invalid OmitEmpty " + strconv.Itoa(int(opts.OmitEmpty))) + } + stringMajorType, err := opts.String.cborType() + if err != nil { + return nil, err + } + if !opts.FieldName.valid() { + return nil, errors.New("cbor: invalid FieldName " + strconv.Itoa(int(opts.FieldName))) + } + byteSliceLaterEncodingTag, err := opts.ByteSliceLaterFormat.encodingTag() + if err != nil { + return nil, err + } + if !opts.ByteArray.valid() { + return nil, errors.New("cbor: invalid ByteArray " + strconv.Itoa(int(opts.ByteArray))) + } + if !opts.BinaryMarshaler.valid() { + return nil, errors.New("cbor: invalid BinaryMarshaler " + strconv.Itoa(int(opts.BinaryMarshaler))) + } + em := encMode{ + sort: opts.Sort, + shortestFloat: opts.ShortestFloat, + nanConvert: opts.NaNConvert, + infConvert: opts.InfConvert, + bigIntConvert: opts.BigIntConvert, + time: opts.Time, + timeTag: opts.TimeTag, + indefLength: opts.IndefLength, + nilContainers: opts.NilContainers, + tagsMd: opts.TagsMd, + omitEmpty: opts.OmitEmpty, + stringType: opts.String, + stringMajorType: stringMajorType, + fieldName: opts.FieldName, + byteSliceLaterFormat: opts.ByteSliceLaterFormat, + byteSliceLaterEncodingTag: byteSliceLaterEncodingTag, + byteArray: opts.ByteArray, + binaryMarshaler: opts.BinaryMarshaler, + } + return &em, nil +} + +// EncMode is the main interface for CBOR encoding. +type EncMode interface { + Marshal(v interface{}) ([]byte, error) + NewEncoder(w io.Writer) *Encoder + EncOptions() EncOptions +} + +// UserBufferEncMode is an interface for CBOR encoding, which extends EncMode by +// adding MarshalToBuffer to support user specified buffer rather than encoding +// into the built-in buffer pool. +type UserBufferEncMode interface { + EncMode + MarshalToBuffer(v interface{}, buf *bytes.Buffer) error + + // This private method is to prevent users implementing + // this interface and so future additions to it will + // not be breaking changes. + // See https://go.dev/blog/module-compatibility + unexport() +} + +type encMode struct { + tags tagProvider + sort SortMode + shortestFloat ShortestFloatMode + nanConvert NaNConvertMode + infConvert InfConvertMode + bigIntConvert BigIntConvertMode + time TimeMode + timeTag EncTagMode + indefLength IndefLengthMode + nilContainers NilContainersMode + tagsMd TagsMode + omitEmpty OmitEmptyMode + stringType StringMode + stringMajorType cborType + fieldName FieldNameMode + byteSliceLaterFormat ByteSliceLaterFormatMode + byteSliceLaterEncodingTag uint64 + byteArray ByteArrayMode + binaryMarshaler BinaryMarshalerMode +} + +var defaultEncMode, _ = EncOptions{}.encMode() + +// These four decoding modes are used by getMarshalerDecMode. +// maxNestedLevels, maxArrayElements, and maxMapPairs are +// set to max allowed limits to avoid rejecting Marshaler +// output that would have been the allowable output of a +// non-Marshaler object that exceeds default limits. +var ( + marshalerForbidIndefLengthForbidTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthForbidden, + tagsMd: TagsForbidden, + } + + marshalerAllowIndefLengthForbidTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthAllowed, + tagsMd: TagsForbidden, + } + + marshalerForbidIndefLengthAllowTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthForbidden, + tagsMd: TagsAllowed, + } + + marshalerAllowIndefLengthAllowTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthAllowed, + tagsMd: TagsAllowed, + } +) + +// getMarshalerDecMode returns one of four existing decoding modes +// which can be reused (safe for parallel use) for the purpose of +// checking if data returned by Marshaler is well-formed. +func getMarshalerDecMode(indefLength IndefLengthMode, tagsMd TagsMode) *decMode { + switch { + case indefLength == IndefLengthAllowed && tagsMd == TagsAllowed: + return &marshalerAllowIndefLengthAllowTagsDecMode + + case indefLength == IndefLengthAllowed && tagsMd == TagsForbidden: + return &marshalerAllowIndefLengthForbidTagsDecMode + + case indefLength == IndefLengthForbidden && tagsMd == TagsAllowed: + return &marshalerForbidIndefLengthAllowTagsDecMode + + case indefLength == IndefLengthForbidden && tagsMd == TagsForbidden: + return &marshalerForbidIndefLengthForbidTagsDecMode + + default: + // This should never happen, unless we add new options to + // IndefLengthMode or TagsMode without updating this function. + return &decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: indefLength, + tagsMd: tagsMd, + } + } +} + +// EncOptions returns user specified options used to create this EncMode. +func (em *encMode) EncOptions() EncOptions { + return EncOptions{ + Sort: em.sort, + ShortestFloat: em.shortestFloat, + NaNConvert: em.nanConvert, + InfConvert: em.infConvert, + BigIntConvert: em.bigIntConvert, + Time: em.time, + TimeTag: em.timeTag, + IndefLength: em.indefLength, + NilContainers: em.nilContainers, + TagsMd: em.tagsMd, + OmitEmpty: em.omitEmpty, + String: em.stringType, + FieldName: em.fieldName, + ByteSliceLaterFormat: em.byteSliceLaterFormat, + ByteArray: em.byteArray, + BinaryMarshaler: em.binaryMarshaler, + } +} + +func (em *encMode) unexport() {} + +func (em *encMode) encTagBytes(t reflect.Type) []byte { + if em.tags != nil { + if tagItem := em.tags.getTagItemFromType(t); tagItem != nil { + return tagItem.cborTagNum + } + } + return nil +} + +// Marshal returns the CBOR encoding of v using em encoding mode. +// +// See the documentation for Marshal for details. +func (em *encMode) Marshal(v interface{}) ([]byte, error) { + e := getEncodeBuffer() + + if err := encode(e, em, reflect.ValueOf(v)); err != nil { + putEncodeBuffer(e) + return nil, err + } + + buf := make([]byte, e.Len()) + copy(buf, e.Bytes()) + + putEncodeBuffer(e) + return buf, nil +} + +// MarshalToBuffer encodes v into provided buffer (instead of using built-in buffer pool) +// and uses em encoding mode. +// +// NOTE: Unlike Marshal, the buffer provided to MarshalToBuffer can contain +// partially encoded data if error is returned. +// +// See Marshal for more details. +func (em *encMode) MarshalToBuffer(v interface{}, buf *bytes.Buffer) error { + if buf == nil { + return fmt.Errorf("cbor: encoding buffer provided by user is nil") + } + return encode(buf, em, reflect.ValueOf(v)) +} + +// NewEncoder returns a new encoder that writes to w using em EncMode. +func (em *encMode) NewEncoder(w io.Writer) *Encoder { + return &Encoder{w: w, em: em} +} + +// encodeBufferPool caches unused bytes.Buffer objects for later reuse. +var encodeBufferPool = sync.Pool{ + New: func() interface{} { + e := new(bytes.Buffer) + e.Grow(32) // TODO: make this configurable + return e + }, +} + +func getEncodeBuffer() *bytes.Buffer { + return encodeBufferPool.Get().(*bytes.Buffer) +} + +func putEncodeBuffer(e *bytes.Buffer) { + e.Reset() + encodeBufferPool.Put(e) +} + +type encodeFunc func(e *bytes.Buffer, em *encMode, v reflect.Value) error +type isEmptyFunc func(em *encMode, v reflect.Value) (empty bool, err error) + +func encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if !v.IsValid() { + // v is zero value + e.Write(cborNil) + return nil + } + vt := v.Type() + f, _ := getEncodeFunc(vt) + if f == nil { + return &UnsupportedTypeError{vt} + } + + return f(e, em, v) +} + +func encodeBool(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + b := cborFalse + if v.Bool() { + b = cborTrue + } + e.Write(b) + return nil +} + +func encodeInt(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + i := v.Int() + if i >= 0 { + encodeHead(e, byte(cborTypePositiveInt), uint64(i)) + return nil + } + i = i*(-1) - 1 + encodeHead(e, byte(cborTypeNegativeInt), uint64(i)) + return nil +} + +func encodeUint(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + encodeHead(e, byte(cborTypePositiveInt), v.Uint()) + return nil +} + +func encodeFloat(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + f64 := v.Float() + if math.IsNaN(f64) { + return encodeNaN(e, em, v) + } + if math.IsInf(f64, 0) { + return encodeInf(e, em, v) + } + fopt := em.shortestFloat + if v.Kind() == reflect.Float64 && (fopt == ShortestFloatNone || cannotFitFloat32(f64)) { + // Encode float64 + // Don't use encodeFloat64() because it cannot be inlined. + const argumentSize = 8 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | byte(additionalInformationAsFloat64) + binary.BigEndian.PutUint64(scratch[1:], math.Float64bits(f64)) + e.Write(scratch[:]) + return nil + } + + f32 := float32(f64) + if fopt == ShortestFloat16 { + var f16 float16.Float16 + p := float16.PrecisionFromfloat32(f32) + if p == float16.PrecisionExact { + // Roundtrip float32->float16->float32 test isn't needed. + f16 = float16.Fromfloat32(f32) + } else if p == float16.PrecisionUnknown { + // Try roundtrip float32->float16->float32 to determine if float32 can fit into float16. + f16 = float16.Fromfloat32(f32) + if f16.Float32() == f32 { + p = float16.PrecisionExact + } + } + if p == float16.PrecisionExact { + // Encode float16 + // Don't use encodeFloat16() because it cannot be inlined. + const argumentSize = 2 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat16 + binary.BigEndian.PutUint16(scratch[1:], uint16(f16)) + e.Write(scratch[:]) + return nil + } + } + + // Encode float32 + // Don't use encodeFloat32() because it cannot be inlined. + const argumentSize = 4 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat32 + binary.BigEndian.PutUint32(scratch[1:], math.Float32bits(f32)) + e.Write(scratch[:]) + return nil +} + +func encodeInf(e *bytes.Buffer, em *encMode, v reflect.Value) error { + f64 := v.Float() + switch em.infConvert { + case InfConvertReject: + return &UnsupportedValueError{msg: "floating-point infinity"} + + case InfConvertFloat16: + if f64 > 0 { + e.Write(cborPositiveInfinity) + } else { + e.Write(cborNegativeInfinity) + } + return nil + } + if v.Kind() == reflect.Float64 { + return encodeFloat64(e, f64) + } + return encodeFloat32(e, float32(f64)) +} + +func encodeNaN(e *bytes.Buffer, em *encMode, v reflect.Value) error { + switch em.nanConvert { + case NaNConvert7e00: + e.Write(cborNaN) + return nil + + case NaNConvertNone: + if v.Kind() == reflect.Float64 { + return encodeFloat64(e, v.Float()) + } + f32 := float32NaNFromReflectValue(v) + return encodeFloat32(e, f32) + + case NaNConvertReject: + return &UnsupportedValueError{msg: "floating-point NaN"} + + default: // NaNConvertPreserveSignal, NaNConvertQuiet + if v.Kind() == reflect.Float64 { + f64 := v.Float() + f64bits := math.Float64bits(f64) + if em.nanConvert == NaNConvertQuiet && f64bits&(1<<51) == 0 { + f64bits |= 1 << 51 // Set quiet bit = 1 + f64 = math.Float64frombits(f64bits) + } + // The lower 29 bits are dropped when converting from float64 to float32. + if f64bits&0x1fffffff != 0 { + // Encode NaN as float64 because dropped coef bits from float64 to float32 are not all 0s. + return encodeFloat64(e, f64) + } + // Create float32 from float64 manually because float32(f64) always turns on NaN's quiet bits. + sign := uint32(f64bits>>32) & (1 << 31) + exp := uint32(0x7f800000) + coef := uint32((f64bits & 0xfffffffffffff) >> 29) + f32bits := sign | exp | coef + f32 := math.Float32frombits(f32bits) + // The lower 13 bits are dropped when converting from float32 to float16. + if f32bits&0x1fff != 0 { + // Encode NaN as float32 because dropped coef bits from float32 to float16 are not all 0s. + return encodeFloat32(e, f32) + } + // Encode NaN as float16 + f16, _ := float16.FromNaN32ps(f32) // Ignore err because it only returns error when f32 is not a NaN. + return encodeFloat16(e, f16) + } + + f32 := float32NaNFromReflectValue(v) + f32bits := math.Float32bits(f32) + if em.nanConvert == NaNConvertQuiet && f32bits&(1<<22) == 0 { + f32bits |= 1 << 22 // Set quiet bit = 1 + f32 = math.Float32frombits(f32bits) + } + // The lower 13 bits are dropped coef bits when converting from float32 to float16. + if f32bits&0x1fff != 0 { + // Encode NaN as float32 because dropped coef bits from float32 to float16 are not all 0s. + return encodeFloat32(e, f32) + } + f16, _ := float16.FromNaN32ps(f32) // Ignore err because it only returns error when f32 is not a NaN. + return encodeFloat16(e, f16) + } +} + +func encodeFloat16(e *bytes.Buffer, f16 float16.Float16) error { + const argumentSize = 2 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat16 + binary.BigEndian.PutUint16(scratch[1:], uint16(f16)) + e.Write(scratch[:]) + return nil +} + +func encodeFloat32(e *bytes.Buffer, f32 float32) error { + const argumentSize = 4 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat32 + binary.BigEndian.PutUint32(scratch[1:], math.Float32bits(f32)) + e.Write(scratch[:]) + return nil +} + +func encodeFloat64(e *bytes.Buffer, f64 float64) error { + const argumentSize = 8 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat64 + binary.BigEndian.PutUint64(scratch[1:], math.Float64bits(f64)) + e.Write(scratch[:]) + return nil +} + +func encodeByteString(e *bytes.Buffer, em *encMode, v reflect.Value) error { + vk := v.Kind() + if vk == reflect.Slice && v.IsNil() && em.nilContainers == NilContainerAsNull { + e.Write(cborNil) + return nil + } + if vk == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 && em.byteSliceLaterEncodingTag != 0 { + encodeHead(e, byte(cborTypeTag), em.byteSliceLaterEncodingTag) + } + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + slen := v.Len() + if slen == 0 { + return e.WriteByte(byte(cborTypeByteString)) + } + encodeHead(e, byte(cborTypeByteString), uint64(slen)) + if vk == reflect.Array { + for i := 0; i < slen; i++ { + e.WriteByte(byte(v.Index(i).Uint())) + } + return nil + } + e.Write(v.Bytes()) + return nil +} + +func encodeString(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + s := v.String() + encodeHead(e, byte(em.stringMajorType), uint64(len(s))) + e.WriteString(s) + return nil +} + +type arrayEncodeFunc struct { + f encodeFunc +} + +func (ae arrayEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.byteArray == ByteArrayToByteSlice && v.Type().Elem().Kind() == reflect.Uint8 { + return encodeByteString(e, em, v) + } + if v.Kind() == reflect.Slice && v.IsNil() && em.nilContainers == NilContainerAsNull { + e.Write(cborNil) + return nil + } + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + alen := v.Len() + if alen == 0 { + return e.WriteByte(byte(cborTypeArray)) + } + encodeHead(e, byte(cborTypeArray), uint64(alen)) + for i := 0; i < alen; i++ { + if err := ae.f(e, em, v.Index(i)); err != nil { + return err + } + } + return nil +} + +// encodeKeyValueFunc encodes key/value pairs in map (v). +// If kvs is provided (having the same length as v), length of encoded key and value are stored in kvs. +// kvs is used for canonical encoding of map. +type encodeKeyValueFunc func(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error + +type mapEncodeFunc struct { + e encodeKeyValueFunc +} + +func (me mapEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if v.IsNil() && em.nilContainers == NilContainerAsNull { + e.Write(cborNil) + return nil + } + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + mlen := v.Len() + if mlen == 0 { + return e.WriteByte(byte(cborTypeMap)) + } + + encodeHead(e, byte(cborTypeMap), uint64(mlen)) + if em.sort == SortNone || em.sort == SortFastShuffle || mlen <= 1 { + return me.e(e, em, v, nil) + } + + kvsp := getKeyValues(v.Len()) // for sorting keys + defer putKeyValues(kvsp) + kvs := *kvsp + + kvBeginOffset := e.Len() + if err := me.e(e, em, v, kvs); err != nil { + return err + } + kvTotalLen := e.Len() - kvBeginOffset + + // Use the capacity at the tail of the encode buffer as a staging area to rearrange the + // encoded pairs into sorted order. + e.Grow(kvTotalLen) + tmp := e.Bytes()[e.Len() : e.Len()+kvTotalLen] // Can use e.AvailableBuffer() in Go 1.21+. + dst := e.Bytes()[kvBeginOffset:] + + if em.sort == SortBytewiseLexical { + sort.Sort(&bytewiseKeyValueSorter{kvs: kvs, data: dst}) + } else { + sort.Sort(&lengthFirstKeyValueSorter{kvs: kvs, data: dst}) + } + + // This is where the encoded bytes are actually rearranged in the output buffer to reflect + // the desired order. + sortedOffset := 0 + for _, kv := range kvs { + copy(tmp[sortedOffset:], dst[kv.offset:kv.nextOffset]) + sortedOffset += kv.nextOffset - kv.offset + } + copy(dst, tmp[:kvTotalLen]) + + return nil + +} + +// keyValue is the position of an encoded pair in a buffer. All offsets are zero-based and relative +// to the first byte of the first encoded pair. +type keyValue struct { + offset int + valueOffset int + nextOffset int +} + +type bytewiseKeyValueSorter struct { + kvs []keyValue + data []byte +} + +func (x *bytewiseKeyValueSorter) Len() int { + return len(x.kvs) +} + +func (x *bytewiseKeyValueSorter) Swap(i, j int) { + x.kvs[i], x.kvs[j] = x.kvs[j], x.kvs[i] +} + +func (x *bytewiseKeyValueSorter) Less(i, j int) bool { + kvi, kvj := x.kvs[i], x.kvs[j] + return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0 +} + +type lengthFirstKeyValueSorter struct { + kvs []keyValue + data []byte +} + +func (x *lengthFirstKeyValueSorter) Len() int { + return len(x.kvs) +} + +func (x *lengthFirstKeyValueSorter) Swap(i, j int) { + x.kvs[i], x.kvs[j] = x.kvs[j], x.kvs[i] +} + +func (x *lengthFirstKeyValueSorter) Less(i, j int) bool { + kvi, kvj := x.kvs[i], x.kvs[j] + if keyLengthDifference := (kvi.valueOffset - kvi.offset) - (kvj.valueOffset - kvj.offset); keyLengthDifference != 0 { + return keyLengthDifference < 0 + } + return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0 +} + +var keyValuePool = sync.Pool{} + +func getKeyValues(length int) *[]keyValue { + v := keyValuePool.Get() + if v == nil { + y := make([]keyValue, length) + return &y + } + x := v.(*[]keyValue) + if cap(*x) >= length { + *x = (*x)[:length] + return x + } + // []keyValue from the pool does not have enough capacity. + // Return it back to the pool and create a new one. + keyValuePool.Put(x) + y := make([]keyValue, length) + return &y +} + +func putKeyValues(x *[]keyValue) { + *x = (*x)[:0] + keyValuePool.Put(x) +} + +func encodeStructToArray(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { + structType, err := getEncodingStructType(v.Type()) + if err != nil { + return err + } + + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + + flds := structType.fields + + encodeHead(e, byte(cborTypeArray), uint64(len(flds))) + for i := 0; i < len(flds); i++ { + f := flds[i] + + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + // Get embedded field value. No error is expected. + fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) { + // Write CBOR nil for null pointer to embedded struct + e.Write(cborNil) + return reflect.Value{}, nil + }) + if !fv.IsValid() { + continue + } + } + + if err := f.ef(e, em, fv); err != nil { + return err + } + } + return nil +} + +func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { + structType, err := getEncodingStructType(v.Type()) + if err != nil { + return err + } + + flds := structType.getFields(em) + + start := 0 + if em.sort == SortFastShuffle && len(flds) > 0 { + start = rand.Intn(len(flds)) //nolint:gosec // Don't need a CSPRNG for deck cutting. + } + + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + + // Encode head with struct field count. + // Head is rewritten later if actual encoded field count is different from struct field count. + encodedHeadLen := encodeHead(e, byte(cborTypeMap), uint64(len(flds))) + + kvbegin := e.Len() + kvcount := 0 + for offset := 0; offset < len(flds); offset++ { + f := flds[(start+offset)%len(flds)] + + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + // Get embedded field value. No error is expected. + fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) { + // Skip null pointer to embedded struct + return reflect.Value{}, nil + }) + if !fv.IsValid() { + continue + } + } + if f.omitEmpty { + empty, err := f.ief(em, fv) + if err != nil { + return err + } + if empty { + continue + } + } + + if !f.keyAsInt && em.fieldName == FieldNameToByteString { + e.Write(f.cborNameByteString) + } else { // int or text string + e.Write(f.cborName) + } + + if err := f.ef(e, em, fv); err != nil { + return err + } + + kvcount++ + } + + if len(flds) == kvcount { + // Encoded element count in head is the same as actual element count. + return nil + } + + // Overwrite the bytes that were reserved for the head before encoding the map entries. + var actualHeadLen int + { + headbuf := *bytes.NewBuffer(e.Bytes()[kvbegin-encodedHeadLen : kvbegin-encodedHeadLen : kvbegin]) + actualHeadLen = encodeHead(&headbuf, byte(cborTypeMap), uint64(kvcount)) + } + + if actualHeadLen == encodedHeadLen { + // The bytes reserved for the encoded head were exactly the right size, so the + // encoded entries are already in their final positions. + return nil + } + + // We reserved more bytes than needed for the encoded head, based on the number of fields + // encoded. The encoded entries are offset to the right by the number of excess reserved + // bytes. Shift the entries left to remove the gap. + excessReservedBytes := encodedHeadLen - actualHeadLen + dst := e.Bytes()[kvbegin-excessReservedBytes : e.Len()-excessReservedBytes] + src := e.Bytes()[kvbegin:e.Len()] + copy(dst, src) + + // After shifting, the excess bytes are at the end of the output buffer and they are + // garbage. + e.Truncate(e.Len() - excessReservedBytes) + return nil +} + +func encodeIntf(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if v.IsNil() { + e.Write(cborNil) + return nil + } + return encode(e, em, v.Elem()) +} + +func encodeTime(e *bytes.Buffer, em *encMode, v reflect.Value) error { + t := v.Interface().(time.Time) + if t.IsZero() { + e.Write(cborNil) // Even if tag is required, encode as CBOR null. + return nil + } + if em.timeTag == EncTagRequired { + tagNumber := 1 + if em.time == TimeRFC3339 || em.time == TimeRFC3339Nano { + tagNumber = 0 + } + encodeHead(e, byte(cborTypeTag), uint64(tagNumber)) + } + switch em.time { + case TimeUnix: + secs := t.Unix() + return encodeInt(e, em, reflect.ValueOf(secs)) + + case TimeUnixMicro: + t = t.UTC().Round(time.Microsecond) + f := float64(t.UnixNano()) / 1e9 + return encodeFloat(e, em, reflect.ValueOf(f)) + + case TimeUnixDynamic: + t = t.UTC().Round(time.Microsecond) + secs, nsecs := t.Unix(), uint64(t.Nanosecond()) + if nsecs == 0 { + return encodeInt(e, em, reflect.ValueOf(secs)) + } + f := float64(secs) + float64(nsecs)/1e9 + return encodeFloat(e, em, reflect.ValueOf(f)) + + case TimeRFC3339: + s := t.Format(time.RFC3339) + return encodeString(e, em, reflect.ValueOf(s)) + + default: // TimeRFC3339Nano + s := t.Format(time.RFC3339Nano) + return encodeString(e, em, reflect.ValueOf(s)) + } +} + +func encodeBigInt(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.bigIntConvert == BigIntConvertReject { + return &UnsupportedTypeError{Type: typeBigInt} + } + + vbi := v.Interface().(big.Int) + sign := vbi.Sign() + bi := new(big.Int).SetBytes(vbi.Bytes()) // bi is absolute value of v + if sign < 0 { + // For negative number, convert to CBOR encoded number (-v-1). + bi.Sub(bi, big.NewInt(1)) + } + + if em.bigIntConvert == BigIntConvertShortest { + if bi.IsUint64() { + if sign >= 0 { + // Encode as CBOR pos int (major type 0) + encodeHead(e, byte(cborTypePositiveInt), bi.Uint64()) + return nil + } + // Encode as CBOR neg int (major type 1) + encodeHead(e, byte(cborTypeNegativeInt), bi.Uint64()) + return nil + } + } + + tagNum := 2 + if sign < 0 { + tagNum = 3 + } + // Write tag number + encodeHead(e, byte(cborTypeTag), uint64(tagNum)) + // Write bignum byte string + b := bi.Bytes() + encodeHead(e, byte(cborTypeByteString), uint64(len(b))) + e.Write(b) + return nil +} + +type binaryMarshalerEncoder struct { + alternateEncode encodeFunc + alternateIsEmpty isEmptyFunc +} + +func (bme binaryMarshalerEncoder) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.binaryMarshaler != BinaryMarshalerByteString { + return bme.alternateEncode(e, em, v) + } + + vt := v.Type() + m, ok := v.Interface().(encoding.BinaryMarshaler) + if !ok { + pv := reflect.New(vt) + pv.Elem().Set(v) + m = pv.Interface().(encoding.BinaryMarshaler) + } + data, err := m.MarshalBinary() + if err != nil { + return err + } + if b := em.encTagBytes(vt); b != nil { + e.Write(b) + } + encodeHead(e, byte(cborTypeByteString), uint64(len(data))) + e.Write(data) + return nil +} + +func (bme binaryMarshalerEncoder) isEmpty(em *encMode, v reflect.Value) (bool, error) { + if em.binaryMarshaler != BinaryMarshalerByteString { + return bme.alternateIsEmpty(em, v) + } + + m, ok := v.Interface().(encoding.BinaryMarshaler) + if !ok { + pv := reflect.New(v.Type()) + pv.Elem().Set(v) + m = pv.Interface().(encoding.BinaryMarshaler) + } + data, err := m.MarshalBinary() + if err != nil { + return false, err + } + return len(data) == 0, nil +} + +func encodeMarshalerType(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.tagsMd == TagsForbidden && v.Type() == typeRawTag { + return errors.New("cbor: cannot encode cbor.RawTag when TagsMd is TagsForbidden") + } + m, ok := v.Interface().(Marshaler) + if !ok { + pv := reflect.New(v.Type()) + pv.Elem().Set(v) + m = pv.Interface().(Marshaler) + } + data, err := m.MarshalCBOR() + if err != nil { + return err + } + + // Verify returned CBOR data item from MarshalCBOR() is well-formed and passes tag validity for builtin tags 0-3. + d := decoder{data: data, dm: getMarshalerDecMode(em.indefLength, em.tagsMd)} + err = d.wellformed(false, true) + if err != nil { + return &MarshalerError{typ: v.Type(), err: err} + } + + e.Write(data) + return nil +} + +func encodeTag(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.tagsMd == TagsForbidden { + return errors.New("cbor: cannot encode cbor.Tag when TagsMd is TagsForbidden") + } + + t := v.Interface().(Tag) + + if t.Number == 0 && t.Content == nil { + // Marshal uninitialized cbor.Tag + e.Write(cborNil) + return nil + } + + // Marshal tag number + encodeHead(e, byte(cborTypeTag), t.Number) + + vem := *em // shallow copy + + // For built-in tags, disable settings that may introduce tag validity errors when + // marshaling certain Content values. + switch t.Number { + case tagNumRFC3339Time: + vem.stringType = StringToTextString + vem.stringMajorType = cborTypeTextString + case tagNumUnsignedBignum, tagNumNegativeBignum: + vem.byteSliceLaterFormat = ByteSliceLaterFormatNone + vem.byteSliceLaterEncodingTag = 0 + } + + // Marshal tag content + return encode(e, &vem, reflect.ValueOf(t.Content)) +} + +// encodeHead writes CBOR head of specified type t and returns number of bytes written. +func encodeHead(e *bytes.Buffer, t byte, n uint64) int { + if n <= maxAdditionalInformationWithoutArgument { + const headSize = 1 + e.WriteByte(t | byte(n)) + return headSize + } + + if n <= math.MaxUint8 { + const headSize = 2 + scratch := [headSize]byte{ + t | byte(additionalInformationWith1ByteArgument), + byte(n), + } + e.Write(scratch[:]) + return headSize + } + + if n <= math.MaxUint16 { + const headSize = 3 + var scratch [headSize]byte + scratch[0] = t | byte(additionalInformationWith2ByteArgument) + binary.BigEndian.PutUint16(scratch[1:], uint16(n)) + e.Write(scratch[:]) + return headSize + } + + if n <= math.MaxUint32 { + const headSize = 5 + var scratch [headSize]byte + scratch[0] = t | byte(additionalInformationWith4ByteArgument) + binary.BigEndian.PutUint32(scratch[1:], uint32(n)) + e.Write(scratch[:]) + return headSize + } + + const headSize = 9 + var scratch [headSize]byte + scratch[0] = t | byte(additionalInformationWith8ByteArgument) + binary.BigEndian.PutUint64(scratch[1:], n) + e.Write(scratch[:]) + return headSize +} + +var ( + typeMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem() + typeBinaryMarshaler = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() + typeRawMessage = reflect.TypeOf(RawMessage(nil)) + typeByteString = reflect.TypeOf(ByteString("")) +) + +func getEncodeFuncInternal(t reflect.Type) (ef encodeFunc, ief isEmptyFunc) { + k := t.Kind() + if k == reflect.Ptr { + return getEncodeIndirectValueFunc(t), isEmptyPtr + } + switch t { + case typeSimpleValue: + return encodeMarshalerType, isEmptyUint + + case typeTag: + return encodeTag, alwaysNotEmpty + + case typeTime: + return encodeTime, alwaysNotEmpty + + case typeBigInt: + return encodeBigInt, alwaysNotEmpty + + case typeRawMessage: + return encodeMarshalerType, isEmptySlice + + case typeByteString: + return encodeMarshalerType, isEmptyString + } + if reflect.PtrTo(t).Implements(typeMarshaler) { + return encodeMarshalerType, alwaysNotEmpty + } + if reflect.PtrTo(t).Implements(typeBinaryMarshaler) { + defer func() { + // capture encoding method used for modes that disable BinaryMarshaler + bme := binaryMarshalerEncoder{ + alternateEncode: ef, + alternateIsEmpty: ief, + } + ef = bme.encode + ief = bme.isEmpty + }() + } + switch k { + case reflect.Bool: + return encodeBool, isEmptyBool + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return encodeInt, isEmptyInt + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return encodeUint, isEmptyUint + + case reflect.Float32, reflect.Float64: + return encodeFloat, isEmptyFloat + + case reflect.String: + return encodeString, isEmptyString + + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { + return encodeByteString, isEmptySlice + } + fallthrough + + case reflect.Array: + f, _ := getEncodeFunc(t.Elem()) + if f == nil { + return nil, nil + } + return arrayEncodeFunc{f: f}.encode, isEmptySlice + + case reflect.Map: + f := getEncodeMapFunc(t) + if f == nil { + return nil, nil + } + return f, isEmptyMap + + case reflect.Struct: + // Get struct's special field "_" tag options + if f, ok := t.FieldByName("_"); ok { + tag := f.Tag.Get("cbor") + if tag != "-" { + if hasToArrayOption(tag) { + return encodeStructToArray, isEmptyStruct + } + } + } + return encodeStruct, isEmptyStruct + + case reflect.Interface: + return encodeIntf, isEmptyIntf + } + return nil, nil +} + +func getEncodeIndirectValueFunc(t reflect.Type) encodeFunc { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + f, _ := getEncodeFunc(t) + if f == nil { + return nil + } + return func(e *bytes.Buffer, em *encMode, v reflect.Value) error { + for v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + if v.Kind() == reflect.Ptr && v.IsNil() { + e.Write(cborNil) + return nil + } + return f(e, em, v) + } +} + +func alwaysNotEmpty(_ *encMode, _ reflect.Value) (empty bool, err error) { + return false, nil +} + +func isEmptyBool(_ *encMode, v reflect.Value) (bool, error) { + return !v.Bool(), nil +} + +func isEmptyInt(_ *encMode, v reflect.Value) (bool, error) { + return v.Int() == 0, nil +} + +func isEmptyUint(_ *encMode, v reflect.Value) (bool, error) { + return v.Uint() == 0, nil +} + +func isEmptyFloat(_ *encMode, v reflect.Value) (bool, error) { + return v.Float() == 0.0, nil +} + +func isEmptyString(_ *encMode, v reflect.Value) (bool, error) { + return v.Len() == 0, nil +} + +func isEmptySlice(_ *encMode, v reflect.Value) (bool, error) { + return v.Len() == 0, nil +} + +func isEmptyMap(_ *encMode, v reflect.Value) (bool, error) { + return v.Len() == 0, nil +} + +func isEmptyPtr(_ *encMode, v reflect.Value) (bool, error) { + return v.IsNil(), nil +} + +func isEmptyIntf(_ *encMode, v reflect.Value) (bool, error) { + return v.IsNil(), nil +} + +func isEmptyStruct(em *encMode, v reflect.Value) (bool, error) { + structType, err := getEncodingStructType(v.Type()) + if err != nil { + return false, err + } + + if em.omitEmpty == OmitEmptyGoValue { + return false, nil + } + + if structType.toArray { + return len(structType.fields) == 0, nil + } + + if len(structType.fields) > len(structType.omitEmptyFieldsIdx) { + return false, nil + } + + for _, i := range structType.omitEmptyFieldsIdx { + f := structType.fields[i] + + // Get field value + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + // Get embedded field value. No error is expected. + fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) { + // Skip null pointer to embedded struct + return reflect.Value{}, nil + }) + if !fv.IsValid() { + continue + } + } + + empty, err := f.ief(em, fv) + if err != nil { + return false, err + } + if !empty { + return false, nil + } + } + return true, nil +} + +func cannotFitFloat32(f64 float64) bool { + f32 := float32(f64) + return float64(f32) != f64 +} + +// float32NaNFromReflectValue extracts float32 NaN from reflect.Value while preserving NaN's quiet bit. +func float32NaNFromReflectValue(v reflect.Value) float32 { + // Keith Randall's workaround for issue https://github.com/golang/go/issues/36400 + p := reflect.New(v.Type()) + p.Elem().Set(v) + f32 := p.Convert(reflect.TypeOf((*float32)(nil))).Elem().Interface().(float32) + return f32 +} diff --git a/go-controller/vendor/github.com/fxamacker/cbor/v2/encode_map.go b/go-controller/vendor/github.com/fxamacker/cbor/v2/encode_map.go new file mode 100644 index 0000000000..8b4b4bbc59 --- /dev/null +++ b/go-controller/vendor/github.com/fxamacker/cbor/v2/encode_map.go @@ -0,0 +1,94 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +//go:build go1.20 + +package cbor + +import ( + "bytes" + "reflect" + "sync" +) + +type mapKeyValueEncodeFunc struct { + kf, ef encodeFunc + kpool, vpool sync.Pool +} + +func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error { + iterk := me.kpool.Get().(*reflect.Value) + defer func() { + iterk.SetZero() + me.kpool.Put(iterk) + }() + iterv := me.vpool.Get().(*reflect.Value) + defer func() { + iterv.SetZero() + me.vpool.Put(iterv) + }() + + if kvs == nil { + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + iterk.SetIterKey(iter) + iterv.SetIterValue(iter) + + if err := me.kf(e, em, *iterk); err != nil { + return err + } + if err := me.ef(e, em, *iterv); err != nil { + return err + } + } + return nil + } + + initial := e.Len() + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + iterk.SetIterKey(iter) + iterv.SetIterValue(iter) + + offset := e.Len() + if err := me.kf(e, em, *iterk); err != nil { + return err + } + valueOffset := e.Len() + if err := me.ef(e, em, *iterv); err != nil { + return err + } + kvs[i] = keyValue{ + offset: offset - initial, + valueOffset: valueOffset - initial, + nextOffset: e.Len() - initial, + } + } + + return nil +} + +func getEncodeMapFunc(t reflect.Type) encodeFunc { + kf, _ := getEncodeFunc(t.Key()) + ef, _ := getEncodeFunc(t.Elem()) + if kf == nil || ef == nil { + return nil + } + mkv := &mapKeyValueEncodeFunc{ + kf: kf, + ef: ef, + kpool: sync.Pool{ + New: func() interface{} { + rk := reflect.New(t.Key()).Elem() + return &rk + }, + }, + vpool: sync.Pool{ + New: func() interface{} { + rv := reflect.New(t.Elem()).Elem() + return &rv + }, + }, + } + return mapEncodeFunc{ + e: mkv.encodeKeyValues, + }.encode +} diff --git a/go-controller/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go b/go-controller/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go new file mode 100644 index 0000000000..31c39336dd --- /dev/null +++ b/go-controller/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go @@ -0,0 +1,60 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +//go:build !go1.20 + +package cbor + +import ( + "bytes" + "reflect" +) + +type mapKeyValueEncodeFunc struct { + kf, ef encodeFunc +} + +func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error { + if kvs == nil { + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + if err := me.kf(e, em, iter.Key()); err != nil { + return err + } + if err := me.ef(e, em, iter.Value()); err != nil { + return err + } + } + return nil + } + + initial := e.Len() + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + offset := e.Len() + if err := me.kf(e, em, iter.Key()); err != nil { + return err + } + valueOffset := e.Len() + if err := me.ef(e, em, iter.Value()); err != nil { + return err + } + kvs[i] = keyValue{ + offset: offset - initial, + valueOffset: valueOffset - initial, + nextOffset: e.Len() - initial, + } + } + + return nil +} + +func getEncodeMapFunc(t reflect.Type) encodeFunc { + kf, _ := getEncodeFunc(t.Key()) + ef, _ := getEncodeFunc(t.Elem()) + if kf == nil || ef == nil { + return nil + } + mkv := &mapKeyValueEncodeFunc{kf: kf, ef: ef} + return mapEncodeFunc{ + e: mkv.encodeKeyValues, + }.encode +} diff --git a/go-controller/vendor/github.com/fxamacker/cbor/v2/simplevalue.go b/go-controller/vendor/github.com/fxamacker/cbor/v2/simplevalue.go new file mode 100644 index 0000000000..de175cee4a --- /dev/null +++ b/go-controller/vendor/github.com/fxamacker/cbor/v2/simplevalue.go @@ -0,0 +1,69 @@ +package cbor + +import ( + "errors" + "fmt" + "reflect" +) + +// SimpleValue represents CBOR simple value. +// CBOR simple value is: +// - an extension point like CBOR tag. +// - a subset of CBOR major type 7 that isn't floating-point. +// - "identified by a number between 0 and 255, but distinct from that number itself". +// For example, "a simple value 2 is not equivalent to an integer 2" as a CBOR map key. +// +// CBOR simple values identified by 20..23 are: "false", "true" , "null", and "undefined". +// Other CBOR simple values are currently unassigned/reserved by IANA. +type SimpleValue uint8 + +var ( + typeSimpleValue = reflect.TypeOf(SimpleValue(0)) +) + +// MarshalCBOR encodes SimpleValue as CBOR simple value (major type 7). +func (sv SimpleValue) MarshalCBOR() ([]byte, error) { + // RFC 8949 3.3. Floating-Point Numbers and Values with No Content says: + // "An encoder MUST NOT issue two-byte sequences that start with 0xf8 + // (major type 7, additional information 24) and continue with a byte + // less than 0x20 (32 decimal). Such sequences are not well-formed. + // (This implies that an encoder cannot encode false, true, null, or + // undefined in two-byte sequences and that only the one-byte variants + // of these are well-formed; more generally speaking, each simple value + // only has a single representation variant)." + + switch { + case sv <= maxSimpleValueInAdditionalInformation: + return []byte{byte(cborTypePrimitives) | byte(sv)}, nil + + case sv >= minSimpleValueIn1ByteArgument: + return []byte{byte(cborTypePrimitives) | additionalInformationWith1ByteArgument, byte(sv)}, nil + + default: + return nil, &UnsupportedValueError{msg: fmt.Sprintf("SimpleValue(%d)", sv)} + } +} + +// UnmarshalCBOR decodes CBOR simple value (major type 7) to SimpleValue. +func (sv *SimpleValue) UnmarshalCBOR(data []byte) error { + if sv == nil { + return errors.New("cbor.SimpleValue: UnmarshalCBOR on nil pointer") + } + + d := decoder{data: data, dm: defaultDecMode} + + typ, ai, val := d.getHead() + + if typ != cborTypePrimitives { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue"} + } + if ai > additionalInformationWith1ByteArgument { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue", errorMsg: "not simple values"} + } + + // It is safe to cast val to uint8 here because + // - data is already verified to be well-formed CBOR simple value and + // - val is <= math.MaxUint8. + *sv = SimpleValue(val) + return nil +} diff --git a/go-controller/vendor/github.com/fxamacker/cbor/v2/stream.go b/go-controller/vendor/github.com/fxamacker/cbor/v2/stream.go new file mode 100644 index 0000000000..507ab6c184 --- /dev/null +++ b/go-controller/vendor/github.com/fxamacker/cbor/v2/stream.go @@ -0,0 +1,277 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "errors" + "io" + "reflect" +) + +// Decoder reads and decodes CBOR values from io.Reader. +type Decoder struct { + r io.Reader + d decoder + buf []byte + off int // next read offset in buf + bytesRead int +} + +// NewDecoder returns a new decoder that reads and decodes from r using +// the default decoding options. +func NewDecoder(r io.Reader) *Decoder { + return defaultDecMode.NewDecoder(r) +} + +// Decode reads CBOR value and decodes it into the value pointed to by v. +func (dec *Decoder) Decode(v interface{}) error { + _, err := dec.readNext() + if err != nil { + // Return validation error or read error. + return err + } + + dec.d.reset(dec.buf[dec.off:]) + err = dec.d.value(v) + + // Increment dec.off even if decoding err is not nil because + // dec.d.off points to the next CBOR data item if current + // CBOR data item is valid but failed to be decoded into v. + // This allows next CBOR data item to be decoded in next + // call to this function. + dec.off += dec.d.off + dec.bytesRead += dec.d.off + + return err +} + +// Skip skips to the next CBOR data item (if there is any), +// otherwise it returns error such as io.EOF, io.UnexpectedEOF, etc. +func (dec *Decoder) Skip() error { + n, err := dec.readNext() + if err != nil { + // Return validation error or read error. + return err + } + + dec.off += n + dec.bytesRead += n + return nil +} + +// NumBytesRead returns the number of bytes read. +func (dec *Decoder) NumBytesRead() int { + return dec.bytesRead +} + +// Buffered returns a reader for data remaining in Decoder's buffer. +// Returned reader is valid until the next call to Decode or Skip. +func (dec *Decoder) Buffered() io.Reader { + return bytes.NewReader(dec.buf[dec.off:]) +} + +// readNext() reads next CBOR data item from Reader to buffer. +// It returns the size of next CBOR data item. +// It also returns validation error or read error if any. +func (dec *Decoder) readNext() (int, error) { + var readErr error + var validErr error + + for { + // Process any unread data in dec.buf. + if dec.off < len(dec.buf) { + dec.d.reset(dec.buf[dec.off:]) + off := dec.off // Save offset before data validation + validErr = dec.d.wellformed(true, false) + dec.off = off // Restore offset + + if validErr == nil { + return dec.d.off, nil + } + + if validErr != io.ErrUnexpectedEOF { + return 0, validErr + } + + // Process last read error on io.ErrUnexpectedEOF. + if readErr != nil { + if readErr == io.EOF { + // current CBOR data item is incomplete. + return 0, io.ErrUnexpectedEOF + } + return 0, readErr + } + } + + // More data is needed and there was no read error. + var n int + for n == 0 { + n, readErr = dec.read() + if n == 0 && readErr != nil { + // No more data can be read and read error is encountered. + // At this point, validErr is either nil or io.ErrUnexpectedEOF. + if readErr == io.EOF { + if validErr == io.ErrUnexpectedEOF { + // current CBOR data item is incomplete. + return 0, io.ErrUnexpectedEOF + } + } + return 0, readErr + } + } + + // At this point, dec.buf contains new data from last read (n > 0). + } +} + +// read() reads data from Reader to buffer. +// It returns number of bytes read and any read error encountered. +// Postconditions: +// - dec.buf contains previously unread data and new data. +// - dec.off is 0. +func (dec *Decoder) read() (int, error) { + // Grow buf if needed. + const minRead = 512 + if cap(dec.buf)-len(dec.buf)+dec.off < minRead { + oldUnreadBuf := dec.buf[dec.off:] + dec.buf = make([]byte, len(dec.buf)-dec.off, 2*cap(dec.buf)+minRead) + dec.overwriteBuf(oldUnreadBuf) + } + + // Copy unread data over read data and reset off to 0. + if dec.off > 0 { + dec.overwriteBuf(dec.buf[dec.off:]) + } + + // Read from reader and reslice buf. + n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)]) + dec.buf = dec.buf[0 : len(dec.buf)+n] + return n, err +} + +func (dec *Decoder) overwriteBuf(newBuf []byte) { + n := copy(dec.buf, newBuf) + dec.buf = dec.buf[:n] + dec.off = 0 +} + +// Encoder writes CBOR values to io.Writer. +type Encoder struct { + w io.Writer + em *encMode + indefTypes []cborType +} + +// NewEncoder returns a new encoder that writes to w using the default encoding options. +func NewEncoder(w io.Writer) *Encoder { + return defaultEncMode.NewEncoder(w) +} + +// Encode writes the CBOR encoding of v. +func (enc *Encoder) Encode(v interface{}) error { + if len(enc.indefTypes) > 0 && v != nil { + indefType := enc.indefTypes[len(enc.indefTypes)-1] + if indefType == cborTypeTextString { + k := reflect.TypeOf(v).Kind() + if k != reflect.String { + return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length text string") + } + } else if indefType == cborTypeByteString { + t := reflect.TypeOf(v) + k := t.Kind() + if (k != reflect.Array && k != reflect.Slice) || t.Elem().Kind() != reflect.Uint8 { + return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length byte string") + } + } + } + + buf := getEncodeBuffer() + + err := encode(buf, enc.em, reflect.ValueOf(v)) + if err == nil { + _, err = enc.w.Write(buf.Bytes()) + } + + putEncodeBuffer(buf) + return err +} + +// StartIndefiniteByteString starts byte string encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes definite length byte strings +// ("chunks") as one contiguous string until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteByteString() error { + return enc.startIndefinite(cborTypeByteString) +} + +// StartIndefiniteTextString starts text string encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes definite length text strings +// ("chunks") as one contiguous string until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteTextString() error { + return enc.startIndefinite(cborTypeTextString) +} + +// StartIndefiniteArray starts array encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes elements of the array +// until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteArray() error { + return enc.startIndefinite(cborTypeArray) +} + +// StartIndefiniteMap starts array encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes elements of the map +// until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteMap() error { + return enc.startIndefinite(cborTypeMap) +} + +// EndIndefinite closes last opened indefinite length value. +func (enc *Encoder) EndIndefinite() error { + if len(enc.indefTypes) == 0 { + return errors.New("cbor: cannot encode \"break\" code outside indefinite length values") + } + _, err := enc.w.Write([]byte{cborBreakFlag}) + if err == nil { + enc.indefTypes = enc.indefTypes[:len(enc.indefTypes)-1] + } + return err +} + +var cborIndefHeader = map[cborType][]byte{ + cborTypeByteString: {cborByteStringWithIndefiniteLengthHead}, + cborTypeTextString: {cborTextStringWithIndefiniteLengthHead}, + cborTypeArray: {cborArrayWithIndefiniteLengthHead}, + cborTypeMap: {cborMapWithIndefiniteLengthHead}, +} + +func (enc *Encoder) startIndefinite(typ cborType) error { + if enc.em.indefLength == IndefLengthForbidden { + return &IndefiniteLengthError{typ} + } + _, err := enc.w.Write(cborIndefHeader[typ]) + if err == nil { + enc.indefTypes = append(enc.indefTypes, typ) + } + return err +} + +// RawMessage is a raw encoded CBOR value. +type RawMessage []byte + +// MarshalCBOR returns m or CBOR nil if m is nil. +func (m RawMessage) MarshalCBOR() ([]byte, error) { + if len(m) == 0 { + return cborNil, nil + } + return m, nil +} + +// UnmarshalCBOR creates a copy of data and saves to *m. +func (m *RawMessage) UnmarshalCBOR(data []byte) error { + if m == nil { + return errors.New("cbor.RawMessage: UnmarshalCBOR on nil pointer") + } + *m = append((*m)[0:0], data...) + return nil +} diff --git a/go-controller/vendor/github.com/fxamacker/cbor/v2/structfields.go b/go-controller/vendor/github.com/fxamacker/cbor/v2/structfields.go new file mode 100644 index 0000000000..81228acf0f --- /dev/null +++ b/go-controller/vendor/github.com/fxamacker/cbor/v2/structfields.go @@ -0,0 +1,260 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "reflect" + "sort" + "strings" +) + +type field struct { + name string + nameAsInt int64 // used to decoder to match field name with CBOR int + cborName []byte + cborNameByteString []byte // major type 2 name encoding iff cborName has major type 3 + idx []int + typ reflect.Type + ef encodeFunc + ief isEmptyFunc + typInfo *typeInfo // used to decoder to reuse type info + tagged bool // used to choose dominant field (at the same level tagged fields dominate untagged fields) + omitEmpty bool // used to skip empty field + keyAsInt bool // used to encode/decode field name as int +} + +type fields []*field + +// indexFieldSorter sorts fields by field idx at each level, breaking ties with idx depth. +type indexFieldSorter struct { + fields fields +} + +func (x *indexFieldSorter) Len() int { + return len(x.fields) +} + +func (x *indexFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *indexFieldSorter) Less(i, j int) bool { + iIdx, jIdx := x.fields[i].idx, x.fields[j].idx + for k := 0; k < len(iIdx) && k < len(jIdx); k++ { + if iIdx[k] != jIdx[k] { + return iIdx[k] < jIdx[k] + } + } + return len(iIdx) <= len(jIdx) +} + +// nameLevelAndTagFieldSorter sorts fields by field name, idx depth, and presence of tag. +type nameLevelAndTagFieldSorter struct { + fields fields +} + +func (x *nameLevelAndTagFieldSorter) Len() int { + return len(x.fields) +} + +func (x *nameLevelAndTagFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *nameLevelAndTagFieldSorter) Less(i, j int) bool { + fi, fj := x.fields[i], x.fields[j] + if fi.name != fj.name { + return fi.name < fj.name + } + if len(fi.idx) != len(fj.idx) { + return len(fi.idx) < len(fj.idx) + } + if fi.tagged != fj.tagged { + return fi.tagged + } + return i < j // Field i and j have the same name, depth, and tagged status. Nothing else matters. +} + +// getFields returns visible fields of struct type t following visibility rules for JSON encoding. +func getFields(t reflect.Type) (flds fields, structOptions string) { + // Get special field "_" tag options + if f, ok := t.FieldByName("_"); ok { + tag := f.Tag.Get("cbor") + if tag != "-" { + structOptions = tag + } + } + + // nTypes contains next level anonymous fields' types and indexes + // (there can be multiple fields of the same type at the same level) + flds, nTypes := appendFields(t, nil, nil, nil) + + if len(nTypes) > 0 { + + var cTypes map[reflect.Type][][]int // current level anonymous fields' types and indexes + vTypes := map[reflect.Type]bool{t: true} // visited field types at less nested levels + + for len(nTypes) > 0 { + cTypes, nTypes = nTypes, nil + + for t, idx := range cTypes { + // If there are multiple anonymous fields of the same struct type at the same level, all are ignored. + if len(idx) > 1 { + continue + } + + // Anonymous field of the same type at deeper nested level is ignored. + if vTypes[t] { + continue + } + vTypes[t] = true + + flds, nTypes = appendFields(t, idx[0], flds, nTypes) + } + } + } + + sort.Sort(&nameLevelAndTagFieldSorter{flds}) + + // Keep visible fields. + j := 0 // index of next unique field + for i := 0; i < len(flds); { + name := flds[i].name + if i == len(flds)-1 || // last field + name != flds[i+1].name || // field i has unique field name + len(flds[i].idx) < len(flds[i+1].idx) || // field i is at a less nested level than field i+1 + (flds[i].tagged && !flds[i+1].tagged) { // field i is tagged while field i+1 is not + flds[j] = flds[i] + j++ + } + + // Skip fields with the same field name. + for i++; i < len(flds) && name == flds[i].name; i++ { //nolint:revive + } + } + if j != len(flds) { + flds = flds[:j] + } + + // Sort fields by field index + sort.Sort(&indexFieldSorter{flds}) + + return flds, structOptions +} + +// appendFields appends type t's exportable fields to flds and anonymous struct fields to nTypes . +func appendFields( + t reflect.Type, + idx []int, + flds fields, + nTypes map[reflect.Type][][]int, +) ( + _flds fields, + _nTypes map[reflect.Type][][]int, +) { + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + + ft := f.Type + for ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + + if !isFieldExportable(f, ft.Kind()) { + continue + } + + tag := f.Tag.Get("cbor") + if tag == "" { + tag = f.Tag.Get("json") + } + if tag == "-" { + continue + } + + tagged := tag != "" + + // Parse field tag options + var tagFieldName string + var omitempty, keyasint bool + for j := 0; tag != ""; j++ { + var token string + idx := strings.IndexByte(tag, ',') + if idx == -1 { + token, tag = tag, "" + } else { + token, tag = tag[:idx], tag[idx+1:] + } + if j == 0 { + tagFieldName = token + } else { + switch token { + case "omitempty": + omitempty = true + case "keyasint": + keyasint = true + } + } + } + + fieldName := tagFieldName + if tagFieldName == "" { + fieldName = f.Name + } + + fIdx := make([]int, len(idx)+1) + copy(fIdx, idx) + fIdx[len(fIdx)-1] = i + + if !f.Anonymous || ft.Kind() != reflect.Struct || tagFieldName != "" { + flds = append(flds, &field{ + name: fieldName, + idx: fIdx, + typ: f.Type, + omitEmpty: omitempty, + keyAsInt: keyasint, + tagged: tagged}) + } else { + if nTypes == nil { + nTypes = make(map[reflect.Type][][]int) + } + nTypes[ft] = append(nTypes[ft], fIdx) + } + } + + return flds, nTypes +} + +// isFieldExportable returns true if f is an exportable (regular or anonymous) field or +// a nonexportable anonymous field of struct type. +// Nonexportable anonymous field of struct type can contain exportable fields. +func isFieldExportable(f reflect.StructField, fk reflect.Kind) bool { //nolint:gocritic // ignore hugeParam + exportable := f.PkgPath == "" + return exportable || (f.Anonymous && fk == reflect.Struct) +} + +type embeddedFieldNullPtrFunc func(reflect.Value) (reflect.Value, error) + +// getFieldValue returns field value of struct v by index. When encountering null pointer +// to anonymous (embedded) struct field, f is called with the last traversed field value. +func getFieldValue(v reflect.Value, idx []int, f embeddedFieldNullPtrFunc) (fv reflect.Value, err error) { + fv = v + for i, n := range idx { + fv = fv.Field(n) + + if i < len(idx)-1 { + if fv.Kind() == reflect.Ptr && fv.Type().Elem().Kind() == reflect.Struct { + if fv.IsNil() { + // Null pointer to embedded struct field + fv, err = f(fv) + if err != nil || !fv.IsValid() { + return fv, err + } + } + fv = fv.Elem() + } + } + } + return fv, nil +} diff --git a/go-controller/vendor/github.com/fxamacker/cbor/v2/tag.go b/go-controller/vendor/github.com/fxamacker/cbor/v2/tag.go new file mode 100644 index 0000000000..5c4d2b7a42 --- /dev/null +++ b/go-controller/vendor/github.com/fxamacker/cbor/v2/tag.go @@ -0,0 +1,299 @@ +package cbor + +import ( + "errors" + "fmt" + "reflect" + "sync" +) + +// Tag represents CBOR tag data, including tag number and unmarshaled tag content. Marshaling and +// unmarshaling of tag content is subject to any encode and decode options that would apply to +// enclosed data item if it were to appear outside of a tag. +type Tag struct { + Number uint64 + Content interface{} +} + +// RawTag represents CBOR tag data, including tag number and raw tag content. +// RawTag implements Unmarshaler and Marshaler interfaces. +type RawTag struct { + Number uint64 + Content RawMessage +} + +// UnmarshalCBOR sets *t with tag number and raw tag content copied from data. +func (t *RawTag) UnmarshalCBOR(data []byte) error { + if t == nil { + return errors.New("cbor.RawTag: UnmarshalCBOR on nil pointer") + } + + // Decoding CBOR null and undefined to cbor.RawTag is no-op. + if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) { + return nil + } + + d := decoder{data: data, dm: defaultDecMode} + + // Unmarshal tag number. + typ, _, num := d.getHead() + if typ != cborTypeTag { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeRawTag.String()} + } + t.Number = num + + // Unmarshal tag content. + c := d.data[d.off:] + t.Content = make([]byte, len(c)) + copy(t.Content, c) + return nil +} + +// MarshalCBOR returns CBOR encoding of t. +func (t RawTag) MarshalCBOR() ([]byte, error) { + if t.Number == 0 && len(t.Content) == 0 { + // Marshal uninitialized cbor.RawTag + b := make([]byte, len(cborNil)) + copy(b, cborNil) + return b, nil + } + + e := getEncodeBuffer() + + encodeHead(e, byte(cborTypeTag), t.Number) + + content := t.Content + if len(content) == 0 { + content = cborNil + } + + buf := make([]byte, len(e.Bytes())+len(content)) + n := copy(buf, e.Bytes()) + copy(buf[n:], content) + + putEncodeBuffer(e) + return buf, nil +} + +// DecTagMode specifies how decoder handles tag number. +type DecTagMode int + +const ( + // DecTagIgnored makes decoder ignore tag number (skips if present). + DecTagIgnored DecTagMode = iota + + // DecTagOptional makes decoder verify tag number if it's present. + DecTagOptional + + // DecTagRequired makes decoder verify tag number and tag number must be present. + DecTagRequired + + maxDecTagMode +) + +func (dtm DecTagMode) valid() bool { + return dtm >= 0 && dtm < maxDecTagMode +} + +// EncTagMode specifies how encoder handles tag number. +type EncTagMode int + +const ( + // EncTagNone makes encoder not encode tag number. + EncTagNone EncTagMode = iota + + // EncTagRequired makes encoder encode tag number. + EncTagRequired + + maxEncTagMode +) + +func (etm EncTagMode) valid() bool { + return etm >= 0 && etm < maxEncTagMode +} + +// TagOptions specifies how encoder and decoder handle tag number. +type TagOptions struct { + DecTag DecTagMode + EncTag EncTagMode +} + +// TagSet is an interface to add and remove tag info. It is used by EncMode and DecMode +// to provide CBOR tag support. +type TagSet interface { + // Add adds given tag number(s), content type, and tag options to TagSet. + Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error + + // Remove removes given tag content type from TagSet. + Remove(contentType reflect.Type) + + tagProvider +} + +type tagProvider interface { + getTagItemFromType(t reflect.Type) *tagItem + getTypeFromTagNum(num []uint64) reflect.Type +} + +type tagItem struct { + num []uint64 + cborTagNum []byte + contentType reflect.Type + opts TagOptions +} + +func (t *tagItem) equalTagNum(num []uint64) bool { + // Fast path to compare 1 tag number + if len(t.num) == 1 && len(num) == 1 && t.num[0] == num[0] { + return true + } + + if len(t.num) != len(num) { + return false + } + + for i := 0; i < len(t.num); i++ { + if t.num[i] != num[i] { + return false + } + } + + return true +} + +type ( + tagSet map[reflect.Type]*tagItem + + syncTagSet struct { + sync.RWMutex + t tagSet + } +) + +func (t tagSet) getTagItemFromType(typ reflect.Type) *tagItem { + return t[typ] +} + +func (t tagSet) getTypeFromTagNum(num []uint64) reflect.Type { + for typ, tag := range t { + if tag.equalTagNum(num) { + return typ + } + } + return nil +} + +// NewTagSet returns TagSet (safe for concurrency). +func NewTagSet() TagSet { + return &syncTagSet{t: make(map[reflect.Type]*tagItem)} +} + +// Add adds given tag number(s), content type, and tag options to TagSet. +func (t *syncTagSet) Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error { + if contentType == nil { + return errors.New("cbor: cannot add nil content type to TagSet") + } + for contentType.Kind() == reflect.Ptr { + contentType = contentType.Elem() + } + tag, err := newTagItem(opts, contentType, num, nestedNum...) + if err != nil { + return err + } + t.Lock() + defer t.Unlock() + for typ, ti := range t.t { + if typ == contentType { + return errors.New("cbor: content type " + contentType.String() + " already exists in TagSet") + } + if ti.equalTagNum(tag.num) { + return fmt.Errorf("cbor: tag number %v already exists in TagSet", tag.num) + } + } + t.t[contentType] = tag + return nil +} + +// Remove removes given tag content type from TagSet. +func (t *syncTagSet) Remove(contentType reflect.Type) { + for contentType.Kind() == reflect.Ptr { + contentType = contentType.Elem() + } + t.Lock() + delete(t.t, contentType) + t.Unlock() +} + +func (t *syncTagSet) getTagItemFromType(typ reflect.Type) *tagItem { + t.RLock() + ti := t.t[typ] + t.RUnlock() + return ti +} + +func (t *syncTagSet) getTypeFromTagNum(num []uint64) reflect.Type { + t.RLock() + rt := t.t.getTypeFromTagNum(num) + t.RUnlock() + return rt +} + +func newTagItem(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) (*tagItem, error) { + if opts.DecTag == DecTagIgnored && opts.EncTag == EncTagNone { + return nil, errors.New("cbor: cannot add tag with DecTagIgnored and EncTagNone options to TagSet") + } + if contentType.PkgPath() == "" || contentType.Kind() == reflect.Interface { + return nil, errors.New("cbor: can only add named types to TagSet, got " + contentType.String()) + } + if contentType == typeTime { + return nil, errors.New("cbor: cannot add time.Time to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead") + } + if contentType == typeBigInt { + return nil, errors.New("cbor: cannot add big.Int to TagSet, it's built-in and supported automatically") + } + if contentType == typeTag { + return nil, errors.New("cbor: cannot add cbor.Tag to TagSet") + } + if contentType == typeRawTag { + return nil, errors.New("cbor: cannot add cbor.RawTag to TagSet") + } + if num == 0 || num == 1 { + return nil, errors.New("cbor: cannot add tag number 0 or 1 to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead") + } + if num == 2 || num == 3 { + return nil, errors.New("cbor: cannot add tag number 2 or 3 to TagSet, it's built-in and supported automatically") + } + if num == tagNumSelfDescribedCBOR { + return nil, errors.New("cbor: cannot add tag number 55799 to TagSet, it's built-in and ignored automatically") + } + + te := tagItem{num: []uint64{num}, opts: opts, contentType: contentType} + te.num = append(te.num, nestedNum...) + + // Cache encoded tag numbers + e := getEncodeBuffer() + for _, n := range te.num { + encodeHead(e, byte(cborTypeTag), n) + } + te.cborTagNum = make([]byte, e.Len()) + copy(te.cborTagNum, e.Bytes()) + putEncodeBuffer(e) + + return &te, nil +} + +var ( + typeTag = reflect.TypeOf(Tag{}) + typeRawTag = reflect.TypeOf(RawTag{}) +) + +// WrongTagError describes mismatch between CBOR tag and registered tag. +type WrongTagError struct { + RegisteredType reflect.Type + RegisteredTagNum []uint64 + TagNum []uint64 +} + +func (e *WrongTagError) Error() string { + return fmt.Sprintf("cbor: wrong tag number for %s, got %v, expected %v", e.RegisteredType.String(), e.TagNum, e.RegisteredTagNum) +} diff --git a/go-controller/vendor/github.com/fxamacker/cbor/v2/valid.go b/go-controller/vendor/github.com/fxamacker/cbor/v2/valid.go new file mode 100644 index 0000000000..b40793b95e --- /dev/null +++ b/go-controller/vendor/github.com/fxamacker/cbor/v2/valid.go @@ -0,0 +1,394 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "encoding/binary" + "errors" + "io" + "math" + "strconv" + + "github.com/x448/float16" +) + +// SyntaxError is a description of a CBOR syntax error. +type SyntaxError struct { + msg string +} + +func (e *SyntaxError) Error() string { return e.msg } + +// SemanticError is a description of a CBOR semantic error. +type SemanticError struct { + msg string +} + +func (e *SemanticError) Error() string { return e.msg } + +// MaxNestedLevelError indicates exceeded max nested level of any combination of CBOR arrays/maps/tags. +type MaxNestedLevelError struct { + maxNestedLevels int +} + +func (e *MaxNestedLevelError) Error() string { + return "cbor: exceeded max nested level " + strconv.Itoa(e.maxNestedLevels) +} + +// MaxArrayElementsError indicates exceeded max number of elements for CBOR arrays. +type MaxArrayElementsError struct { + maxArrayElements int +} + +func (e *MaxArrayElementsError) Error() string { + return "cbor: exceeded max number of elements " + strconv.Itoa(e.maxArrayElements) + " for CBOR array" +} + +// MaxMapPairsError indicates exceeded max number of key-value pairs for CBOR maps. +type MaxMapPairsError struct { + maxMapPairs int +} + +func (e *MaxMapPairsError) Error() string { + return "cbor: exceeded max number of key-value pairs " + strconv.Itoa(e.maxMapPairs) + " for CBOR map" +} + +// IndefiniteLengthError indicates found disallowed indefinite length items. +type IndefiniteLengthError struct { + t cborType +} + +func (e *IndefiniteLengthError) Error() string { + return "cbor: indefinite-length " + e.t.String() + " isn't allowed" +} + +// TagsMdError indicates found disallowed CBOR tags. +type TagsMdError struct { +} + +func (e *TagsMdError) Error() string { + return "cbor: CBOR tag isn't allowed" +} + +// ExtraneousDataError indicates found extraneous data following well-formed CBOR data item. +type ExtraneousDataError struct { + numOfBytes int // number of bytes of extraneous data + index int // location of extraneous data +} + +func (e *ExtraneousDataError) Error() string { + return "cbor: " + strconv.Itoa(e.numOfBytes) + " bytes of extraneous data starting at index " + strconv.Itoa(e.index) +} + +// wellformed checks whether the CBOR data item is well-formed. +// allowExtraData indicates if extraneous data is allowed after the CBOR data item. +// - use allowExtraData = true when using Decoder.Decode() +// - use allowExtraData = false when using Unmarshal() +func (d *decoder) wellformed(allowExtraData bool, checkBuiltinTags bool) error { + if len(d.data) == d.off { + return io.EOF + } + _, err := d.wellformedInternal(0, checkBuiltinTags) + if err == nil { + if !allowExtraData && d.off != len(d.data) { + err = &ExtraneousDataError{len(d.data) - d.off, d.off} + } + } + return err +} + +// wellformedInternal checks data's well-formedness and returns max depth and error. +func (d *decoder) wellformedInternal(depth int, checkBuiltinTags bool) (int, error) { //nolint:gocyclo + t, _, val, indefiniteLength, err := d.wellformedHeadWithIndefiniteLengthFlag() + if err != nil { + return 0, err + } + + switch t { + case cborTypeByteString, cborTypeTextString: + if indefiniteLength { + if d.dm.indefLength == IndefLengthForbidden { + return 0, &IndefiniteLengthError{t} + } + return d.wellformedIndefiniteString(t, depth, checkBuiltinTags) + } + valInt := int(val) + if valInt < 0 { + // Detect integer overflow + return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, causing integer overflow") + } + if len(d.data)-d.off < valInt { // valInt+off may overflow integer + return 0, io.ErrUnexpectedEOF + } + d.off += valInt + + case cborTypeArray, cborTypeMap: + depth++ + if depth > d.dm.maxNestedLevels { + return 0, &MaxNestedLevelError{d.dm.maxNestedLevels} + } + + if indefiniteLength { + if d.dm.indefLength == IndefLengthForbidden { + return 0, &IndefiniteLengthError{t} + } + return d.wellformedIndefiniteArrayOrMap(t, depth, checkBuiltinTags) + } + + valInt := int(val) + if valInt < 0 { + // Detect integer overflow + return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, it would cause integer overflow") + } + + if t == cborTypeArray { + if valInt > d.dm.maxArrayElements { + return 0, &MaxArrayElementsError{d.dm.maxArrayElements} + } + } else { + if valInt > d.dm.maxMapPairs { + return 0, &MaxMapPairsError{d.dm.maxMapPairs} + } + } + + count := 1 + if t == cborTypeMap { + count = 2 + } + maxDepth := depth + for j := 0; j < count; j++ { + for i := 0; i < valInt; i++ { + var dpt int + if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil { + return 0, err + } + if dpt > maxDepth { + maxDepth = dpt // Save max depth + } + } + } + depth = maxDepth + + case cborTypeTag: + if d.dm.tagsMd == TagsForbidden { + return 0, &TagsMdError{} + } + + tagNum := val + + // Scan nested tag numbers to avoid recursion. + for { + if len(d.data) == d.off { // Tag number must be followed by tag content. + return 0, io.ErrUnexpectedEOF + } + if checkBuiltinTags { + err = validBuiltinTag(tagNum, d.data[d.off]) + if err != nil { + return 0, err + } + } + if d.dm.bignumTag == BignumTagForbidden && (tagNum == 2 || tagNum == 3) { + return 0, &UnacceptableDataItemError{ + CBORType: cborTypeTag.String(), + Message: "bignum", + } + } + if getType(d.data[d.off]) != cborTypeTag { + break + } + if _, _, tagNum, err = d.wellformedHead(); err != nil { + return 0, err + } + depth++ + if depth > d.dm.maxNestedLevels { + return 0, &MaxNestedLevelError{d.dm.maxNestedLevels} + } + } + // Check tag content. + return d.wellformedInternal(depth, checkBuiltinTags) + } + + return depth, nil +} + +// wellformedIndefiniteString checks indefinite length byte/text string's well-formedness and returns max depth and error. +func (d *decoder) wellformedIndefiniteString(t cborType, depth int, checkBuiltinTags bool) (int, error) { + var err error + for { + if len(d.data) == d.off { + return 0, io.ErrUnexpectedEOF + } + if isBreakFlag(d.data[d.off]) { + d.off++ + break + } + // Peek ahead to get next type and indefinite length status. + nt, ai := parseInitialByte(d.data[d.off]) + if t != nt { + return 0, &SyntaxError{"cbor: wrong element type " + nt.String() + " for indefinite-length " + t.String()} + } + if additionalInformation(ai).isIndefiniteLength() { + return 0, &SyntaxError{"cbor: indefinite-length " + t.String() + " chunk is not definite-length"} + } + if depth, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil { + return 0, err + } + } + return depth, nil +} + +// wellformedIndefiniteArrayOrMap checks indefinite length array/map's well-formedness and returns max depth and error. +func (d *decoder) wellformedIndefiniteArrayOrMap(t cborType, depth int, checkBuiltinTags bool) (int, error) { + var err error + maxDepth := depth + i := 0 + for { + if len(d.data) == d.off { + return 0, io.ErrUnexpectedEOF + } + if isBreakFlag(d.data[d.off]) { + d.off++ + break + } + var dpt int + if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil { + return 0, err + } + if dpt > maxDepth { + maxDepth = dpt + } + i++ + if t == cborTypeArray { + if i > d.dm.maxArrayElements { + return 0, &MaxArrayElementsError{d.dm.maxArrayElements} + } + } else { + if i%2 == 0 && i/2 > d.dm.maxMapPairs { + return 0, &MaxMapPairsError{d.dm.maxMapPairs} + } + } + } + if t == cborTypeMap && i%2 == 1 { + return 0, &SyntaxError{"cbor: unexpected \"break\" code"} + } + return maxDepth, nil +} + +func (d *decoder) wellformedHeadWithIndefiniteLengthFlag() ( + t cborType, + ai byte, + val uint64, + indefiniteLength bool, + err error, +) { + t, ai, val, err = d.wellformedHead() + if err != nil { + return + } + indefiniteLength = additionalInformation(ai).isIndefiniteLength() + return +} + +func (d *decoder) wellformedHead() (t cborType, ai byte, val uint64, err error) { + dataLen := len(d.data) - d.off + if dataLen == 0 { + return 0, 0, 0, io.ErrUnexpectedEOF + } + + t, ai = parseInitialByte(d.data[d.off]) + val = uint64(ai) + d.off++ + dataLen-- + + if ai <= maxAdditionalInformationWithoutArgument { + return t, ai, val, nil + } + + if ai == additionalInformationWith1ByteArgument { + const argumentSize = 1 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = uint64(d.data[d.off]) + d.off++ + if t == cborTypePrimitives && val < 32 { + return 0, 0, 0, &SyntaxError{"cbor: invalid simple value " + strconv.Itoa(int(val)) + " for type " + t.String()} + } + return t, ai, val, nil + } + + if ai == additionalInformationWith2ByteArgument { + const argumentSize = 2 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + if t == cborTypePrimitives { + if err := d.acceptableFloat(float64(float16.Frombits(uint16(val)).Float32())); err != nil { + return 0, 0, 0, err + } + } + return t, ai, val, nil + } + + if ai == additionalInformationWith4ByteArgument { + const argumentSize = 4 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + if t == cborTypePrimitives { + if err := d.acceptableFloat(float64(math.Float32frombits(uint32(val)))); err != nil { + return 0, 0, 0, err + } + } + return t, ai, val, nil + } + + if ai == additionalInformationWith8ByteArgument { + const argumentSize = 8 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = binary.BigEndian.Uint64(d.data[d.off : d.off+argumentSize]) + d.off += argumentSize + if t == cborTypePrimitives { + if err := d.acceptableFloat(math.Float64frombits(val)); err != nil { + return 0, 0, 0, err + } + } + return t, ai, val, nil + } + + if additionalInformation(ai).isIndefiniteLength() { + switch t { + case cborTypePositiveInt, cborTypeNegativeInt, cborTypeTag: + return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()} + case cborTypePrimitives: // 0xff (break code) should not be outside wellformedIndefinite(). + return 0, 0, 0, &SyntaxError{"cbor: unexpected \"break\" code"} + } + return t, ai, val, nil + } + + // ai == 28, 29, 30 + return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()} +} + +func (d *decoder) acceptableFloat(f float64) error { + switch { + case d.dm.nanDec == NaNDecodeForbidden && math.IsNaN(f): + return &UnacceptableDataItemError{ + CBORType: cborTypePrimitives.String(), + Message: "floating-point NaN", + } + case d.dm.infDec == InfDecodeForbidden && math.IsInf(f, 0): + return &UnacceptableDataItemError{ + CBORType: cborTypePrimitives.String(), + Message: "floating-point infinity", + } + } + return nil +} diff --git a/go-controller/vendor/github.com/go-logr/logr/README.md b/go-controller/vendor/github.com/go-logr/logr/README.md index 8969526a6e..7c7f0c69cd 100644 --- a/go-controller/vendor/github.com/go-logr/logr/README.md +++ b/go-controller/vendor/github.com/go-logr/logr/README.md @@ -1,6 +1,7 @@ # A minimal logging API for Go [![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-logr/logr)](https://goreportcard.com/report/github.com/go-logr/logr) [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr) logr offers an(other) opinion on how Go programs and libraries can do logging diff --git a/go-controller/vendor/github.com/go-logr/logr/funcr/funcr.go b/go-controller/vendor/github.com/go-logr/logr/funcr/funcr.go index fb2f866f4b..30568e768d 100644 --- a/go-controller/vendor/github.com/go-logr/logr/funcr/funcr.go +++ b/go-controller/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -236,15 +236,14 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter { // implementation. It should be constructed with NewFormatter. Some of // its methods directly implement logr.LogSink. type Formatter struct { - outputFormat outputFormat - prefix string - values []any - valuesStr string - parentValuesStr string - depth int - opts *Options - group string // for slog groups - groupDepth int + outputFormat outputFormat + prefix string + values []any + valuesStr string + depth int + opts *Options + groupName string // for slog groups + groups []groupDef } // outputFormat indicates which outputFormat to use. @@ -257,6 +256,13 @@ const ( outputJSON ) +// groupDef represents a saved group. The values may be empty, but we don't +// know if we need to render the group until the final record is rendered. +type groupDef struct { + name string + values string +} + // PseudoStruct is a list of key-value pairs that gets logged as a struct. type PseudoStruct []any @@ -264,76 +270,102 @@ type PseudoStruct []any func (f Formatter) render(builtins, args []any) string { // Empirically bytes.Buffer is faster than strings.Builder for this. buf := bytes.NewBuffer(make([]byte, 0, 1024)) + if f.outputFormat == outputJSON { - buf.WriteByte('{') // for the whole line + buf.WriteByte('{') // for the whole record } + // Render builtins vals := builtins if hook := f.opts.RenderBuiltinsHook; hook != nil { vals = hook(f.sanitize(vals)) } - f.flatten(buf, vals, false, false) // keys are ours, no need to escape + f.flatten(buf, vals, false) // keys are ours, no need to escape continuing := len(builtins) > 0 - if f.parentValuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) + // Turn the inner-most group into a string + argsStr := func() string { + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + + vals = args + if hook := f.opts.RenderArgsHook; hook != nil { + vals = hook(f.sanitize(vals)) } - buf.WriteString(f.parentValuesStr) - continuing = true - } + f.flatten(buf, vals, true) // escape user-provided keys - groupDepth := f.groupDepth - if f.group != "" { - if f.valuesStr != "" || len(args) != 0 { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys - buf.WriteByte(f.colon()) - buf.WriteByte('{') // for the group - continuing = false - } else { - // The group was empty - groupDepth-- + return buf.String() + }() + + // Render the stack of groups from the inside out. + bodyStr := f.renderGroup(f.groupName, f.valuesStr, argsStr) + for i := len(f.groups) - 1; i >= 0; i-- { + grp := &f.groups[i] + if grp.values == "" && bodyStr == "" { + // no contents, so we must elide the whole group + continue } + bodyStr = f.renderGroup(grp.name, grp.values, bodyStr) } - if f.valuesStr != "" { + if bodyStr != "" { if continuing { buf.WriteByte(f.comma()) } - buf.WriteString(f.valuesStr) - continuing = true + buf.WriteString(bodyStr) } - vals = args - if hook := f.opts.RenderArgsHook; hook != nil { - vals = hook(f.sanitize(vals)) + if f.outputFormat == outputJSON { + buf.WriteByte('}') // for the whole record } - f.flatten(buf, vals, continuing, true) // escape user-provided keys - for i := 0; i < groupDepth; i++ { - buf.WriteByte('}') // for the groups + return buf.String() +} + +// renderGroup returns a string representation of the named group with rendered +// values and args. If the name is empty, this will return the values and args, +// joined. If the name is not empty, this will return a single key-value pair, +// where the value is a grouping of the values and args. If the values and +// args are both empty, this will return an empty string, even if the name was +// specified. +func (f Formatter) renderGroup(name string, values string, args string) string { + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + + needClosingBrace := false + if name != "" && (values != "" || args != "") { + buf.WriteString(f.quoted(name, true)) // escape user-provided keys + buf.WriteByte(f.colon()) + buf.WriteByte('{') + needClosingBrace = true } - if f.outputFormat == outputJSON { - buf.WriteByte('}') // for the whole line + continuing := false + if values != "" { + buf.WriteString(values) + continuing = true + } + + if args != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(args) + } + + if needClosingBrace { + buf.WriteByte('}') } return buf.String() } -// flatten renders a list of key-value pairs into a buffer. If continuing is -// true, it assumes that the buffer has previous values and will emit a -// separator (which depends on the output format) before the first pair it -// writes. If escapeKeys is true, the keys are assumed to have -// non-JSON-compatible characters in them and must be evaluated for escapes. +// flatten renders a list of key-value pairs into a buffer. If escapeKeys is +// true, the keys are assumed to have non-JSON-compatible characters in them +// and must be evaluated for escapes. // // This function returns a potentially modified version of kvList, which // ensures that there is a value for every key (adding a value if needed) and // that each key is a string (substituting a key if needed). -func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any { +func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, escapeKeys bool) []any { // This logic overlaps with sanitize() but saves one type-cast per key, // which can be measurable. if len(kvList)%2 != 0 { @@ -354,7 +386,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc } v := kvList[i+1] - if i > 0 || continuing { + if i > 0 { if f.outputFormat == outputJSON { buf.WriteByte(f.comma()) } else { @@ -766,46 +798,17 @@ func (f Formatter) sanitize(kvList []any) []any { // startGroup opens a new group scope (basically a sub-struct), which locks all // the current saved values and starts them anew. This is needed to satisfy // slog. -func (f *Formatter) startGroup(group string) { +func (f *Formatter) startGroup(name string) { // Unnamed groups are just inlined. - if group == "" { + if name == "" { return } - // Any saved values can no longer be changed. - buf := bytes.NewBuffer(make([]byte, 0, 1024)) - continuing := false - - if f.parentValuesStr != "" { - buf.WriteString(f.parentValuesStr) - continuing = true - } - - if f.group != "" && f.valuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys - buf.WriteByte(f.colon()) - buf.WriteByte('{') // for the group - continuing = false - } - - if f.valuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.valuesStr) - } - - // NOTE: We don't close the scope here - that's done later, when a log line - // is actually rendered (because we have N scopes to close). - - f.parentValuesStr = buf.String() + n := len(f.groups) + f.groups = append(f.groups[:n:n], groupDef{f.groupName, f.valuesStr}) // Start collecting new values. - f.group = group - f.groupDepth++ + f.groupName = name f.valuesStr = "" f.values = nil } @@ -900,7 +903,7 @@ func (f *Formatter) AddValues(kvList []any) { // Pre-render values, so we don't have to do it on each Info/Error call. buf := bytes.NewBuffer(make([]byte, 0, 1024)) - f.flatten(buf, vals, false, true) // escape user-provided keys + f.flatten(buf, vals, true) // escape user-provided keys f.valuesStr = buf.String() } diff --git a/go-controller/vendor/github.com/go-openapi/swag/util.go b/go-controller/vendor/github.com/go-openapi/swag/util.go index f78ab684a0..d971fbe34b 100644 --- a/go-controller/vendor/github.com/go-openapi/swag/util.go +++ b/go-controller/vendor/github.com/go-openapi/swag/util.go @@ -341,12 +341,21 @@ type zeroable interface { // IsZero returns true when the value passed into the function is a zero value. // This allows for safer checking of interface values. func IsZero(data interface{}) bool { + v := reflect.ValueOf(data) + // check for nil data + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + if v.IsNil() { + return true + } + } + // check for things that have an IsZero method instead if vv, ok := data.(zeroable); ok { return vv.IsZero() } + // continue with slightly more complex reflection - v := reflect.ValueOf(data) switch v.Kind() { case reflect.String: return v.Len() == 0 @@ -358,14 +367,13 @@ func IsZero(data interface{}) bool { return v.Uint() == 0 case reflect.Float32, reflect.Float64: return v.Float() == 0 - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() case reflect.Struct, reflect.Array: return reflect.DeepEqual(data, reflect.Zero(v.Type()).Interface()) case reflect.Invalid: return true + default: + return false } - return false } // AddInitialisms add additional initialisms diff --git a/go-controller/vendor/github.com/go-task/slim-sprig/v3/.editorconfig b/go-controller/vendor/github.com/go-task/slim-sprig/v3/.editorconfig new file mode 100644 index 0000000000..b0c95367e7 --- /dev/null +++ b/go-controller/vendor/github.com/go-task/slim-sprig/v3/.editorconfig @@ -0,0 +1,14 @@ +# editorconfig.org + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = tab +indent_size = 8 + +[*.{md,yml,yaml,json}] +indent_style = space +indent_size = 2 diff --git a/go-controller/vendor/github.com/go-task/slim-sprig/v3/.gitattributes b/go-controller/vendor/github.com/go-task/slim-sprig/v3/.gitattributes new file mode 100644 index 0000000000..176a458f94 --- /dev/null +++ b/go-controller/vendor/github.com/go-task/slim-sprig/v3/.gitattributes @@ -0,0 +1 @@ +* text=auto diff --git a/go-controller/vendor/github.com/go-task/slim-sprig/v3/.gitignore b/go-controller/vendor/github.com/go-task/slim-sprig/v3/.gitignore new file mode 100644 index 0000000000..5e3002f88f --- /dev/null +++ b/go-controller/vendor/github.com/go-task/slim-sprig/v3/.gitignore @@ -0,0 +1,2 @@ +vendor/ +/.glide diff --git a/go-controller/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md b/go-controller/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md new file mode 100644 index 0000000000..2ce45dd4ec --- /dev/null +++ b/go-controller/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md @@ -0,0 +1,383 @@ +# Changelog + +## Release 3.2.3 (2022-11-29) + +### Changed + +- Updated docs (thanks @book987 @aJetHorn @neelayu @pellizzetti @apricote @SaigyoujiYuyuko233 @AlekSi) +- #348: Updated huandu/xstrings which fixed a snake case bug (thanks @yxxhero) +- #353: Updated masterminds/semver which included bug fixes +- #354: Updated golang.org/x/crypto which included bug fixes + +## Release 3.2.2 (2021-02-04) + +This is a re-release of 3.2.1 to satisfy something with the Go module system. + +## Release 3.2.1 (2021-02-04) + +### Changed + +- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr) + +## Release 3.2.0 (2020-12-14) + +### Added + +- #211: Added randInt function (thanks @kochurovro) +- #223: Added fromJson and mustFromJson functions (thanks @mholt) +- #242: Added a bcrypt function (thanks @robbiet480) +- #253: Added randBytes function (thanks @MikaelSmith) +- #254: Added dig function for dicts (thanks @nyarly) +- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton) +- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl) +- #268: Added and and all functions for testing conditions (thanks @phuslu) +- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf + (thanks @andrewmostello) +- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek) +- #270: Extend certificate functions to handle non-RSA keys + add support for + ed25519 keys (thanks @misberner) + +### Changed + +- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer +- Using semver 3.1.1 and mergo 0.3.11 + +### Fixed + +- #249: Fix htmlDateInZone example (thanks @spawnia) + +NOTE: The dependency github.com/imdario/mergo reverted the breaking change in +0.3.9 via 0.3.10 release. + +## Release 3.1.0 (2020-04-16) + +NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9 +that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8. + +### Added + +- #225: Added support for generating htpasswd hash (thanks @rustycl0ck) +- #224: Added duration filter (thanks @frebib) +- #205: Added `seq` function (thanks @thadc23) + +### Changed + +- #203: Unlambda functions with correct signature (thanks @muesli) +- #236: Updated the license formatting for GitHub display purposes +- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9 + as it causes a breaking change for sprig. That issue is tracked at + https://github.com/imdario/mergo/issues/139 + +### Fixed + +- #229: Fix `seq` example in docs (thanks @kalmant) + +## Release 3.0.2 (2019-12-13) + +### Fixed + +- #220: Updating to semver v3.0.3 to fix issue with <= ranges +- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya) + +## Release 3.0.1 (2019-12-08) + +### Fixed + +- #212: Updated semver fixing broken constraint checking with ^0.0 + +## Release 3.0.0 (2019-10-02) + +### Added + +- #187: Added durationRound function (thanks @yjp20) +- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn) +- #193: Added toRawJson support (thanks @Dean-Coakley) +- #197: Added get support to dicts (thanks @Dean-Coakley) + +### Changed + +- #186: Moving dependency management to Go modules +- #186: Updated semver to v3. This has changes in the way ^ is handled +- #194: Updated documentation on merging and how it copies. Added example using deepCopy +- #196: trunc now supports negative values (thanks @Dean-Coakley) + +## Release 2.22.0 (2019-10-02) + +### Added + +- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos) +- #195: Added deepCopy function for use with dicts + +### Changed + +- Updated merge and mergeOverwrite documentation to explain copying and how to + use deepCopy with it + +## Release 2.21.0 (2019-09-18) + +### Added + +- #122: Added encryptAES/decryptAES functions (thanks @n0madic) +- #128: Added toDecimal support (thanks @Dean-Coakley) +- #169: Added list contcat (thanks @astorath) +- #174: Added deepEqual function (thanks @bonifaido) +- #170: Added url parse and join functions (thanks @astorath) + +### Changed + +- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify + +### Fixed + +- #172: Fix semver wildcard example (thanks @piepmatz) +- #175: Fix dateInZone doc example (thanks @s3than) + +## Release 2.20.0 (2019-06-18) + +### Added + +- #164: Adding function to get unix epoch for a time (@mattfarina) +- #166: Adding tests for date_in_zone (@mattfarina) + +### Changed + +- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam) +- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19) +- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan) + +### Fixed + +## Release 2.19.0 (2019-03-02) + +IMPORTANT: This release reverts a change from 2.18.0 + +In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random. + +We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience. + +### Changed + +- Fix substr panic 35fb796 (Alexey igrychev) +- Remove extra period 1eb7729 (Matthew Lorimor) +- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor) +- README edits/fixes/suggestions 08fe136 (Lauri Apple) + + +## Release 2.18.0 (2019-02-12) + +### Added + +- Added mergeOverwrite function +- cryptographic functions that use secure random (see fe1de12) + +### Changed + +- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer) +- Handle has for nil list 9c10885 (Daniel Cohen) +- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder) +- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic) +- Replace outdated goutils imports 01893d2 (Matthew Lorimor) +- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor) +- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen) + +### Fixed + +- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder) +- Fix substr var names and comments d581f80 (Dean Coakley) +- Fix substr documentation 2737203 (Dean Coakley) + +## Release 2.17.1 (2019-01-03) + +### Fixed + +The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml. + +## Release 2.17.0 (2019-01-03) + +### Added + +- adds alder32sum function and test 6908fc2 (marshallford) +- Added kebabcase function ca331a1 (Ilyes512) + +### Changed + +- Update goutils to 1.1.0 4e1125d (Matt Butcher) + +### Fixed + +- Fix 'has' documentation e3f2a85 (dean-coakley) +- docs(dict): fix typo in pick example dc424f9 (Dustin Specker) +- fixes spelling errors... not sure how that happened 4cf188a (marshallford) + +## Release 2.16.0 (2018-08-13) + +### Added + +- add splitn function fccb0b0 (Helgi Þorbjörnsson) +- Add slice func df28ca7 (gongdo) +- Generate serial number a3bdffd (Cody Coons) +- Extract values of dict with values function df39312 (Lawrence Jones) + +### Changed + +- Modify panic message for list.slice ae38335 (gongdo) +- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap) +- Remove duplicated documentation 1d97af1 (Matthew Fisher) +- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson) + +### Fixed + +- Fix file permissions c5f40b5 (gongdo) +- Fix example for buildCustomCert 7779e0d (Tin Lam) + +## Release 2.15.0 (2018-04-02) + +### Added + +- #68 and #69: Add json helpers to docs (thanks @arunvelsriram) +- #66: Add ternary function (thanks @binoculars) +- #67: Allow keys function to take multiple dicts (thanks @binoculars) +- #89: Added sha1sum to crypto function (thanks @benkeil) +- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei) +- #92: Add travis testing for go 1.10 +- #93: Adding appveyor config for windows testing + +### Changed + +- #90: Updating to more recent dependencies +- #73: replace satori/go.uuid with google/uuid (thanks @petterw) + +### Fixed + +- #76: Fixed documentation typos (thanks @Thiht) +- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older + +## Release 2.14.1 (2017-12-01) + +### Fixed + +- #60: Fix typo in function name documentation (thanks @neil-ca-moore) +- #61: Removing line with {{ due to blocking github pages genertion +- #64: Update the list functions to handle int, string, and other slices for compatibility + +## Release 2.14.0 (2017-10-06) + +This new version of Sprig adds a set of functions for generating and working with SSL certificates. + +- `genCA` generates an SSL Certificate Authority +- `genSelfSignedCert` generates an SSL self-signed certificate +- `genSignedCert` generates an SSL certificate and key based on a given CA + +## Release 2.13.0 (2017-09-18) + +This release adds new functions, including: + +- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions +- `floor`, `ceil`, and `round` math functions +- `toDate` converts a string to a date +- `nindent` is just like `indent` but also prepends a new line +- `ago` returns the time from `time.Now` + +### Added + +- #40: Added basic regex functionality (thanks @alanquillin) +- #41: Added ceil floor and round functions (thanks @alanquillin) +- #48: Added toDate function (thanks @andreynering) +- #50: Added nindent function (thanks @binoculars) +- #46: Added ago function (thanks @slayer) + +### Changed + +- #51: Updated godocs to include new string functions (thanks @curtisallen) +- #49: Added ability to merge multiple dicts (thanks @binoculars) + +## Release 2.12.0 (2017-05-17) + +- `snakecase`, `camelcase`, and `shuffle` are three new string functions +- `fail` allows you to bail out of a template render when conditions are not met + +## Release 2.11.0 (2017-05-02) + +- Added `toJson` and `toPrettyJson` +- Added `merge` +- Refactored documentation + +## Release 2.10.0 (2017-03-15) + +- Added `semver` and `semverCompare` for Semantic Versions +- `list` replaces `tuple` +- Fixed issue with `join` +- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without` + +## Release 2.9.0 (2017-02-23) + +- Added `splitList` to split a list +- Added crypto functions of `genPrivateKey` and `derivePassword` + +## Release 2.8.0 (2016-12-21) + +- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`) +- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`) + +## Release 2.7.0 (2016-12-01) + +- Added `sha256sum` to generate a hash of an input +- Added functions to convert a numeric or string to `int`, `int64`, `float64` + +## Release 2.6.0 (2016-10-03) + +- Added a `uuidv4` template function for generating UUIDs inside of a template. + +## Release 2.5.0 (2016-08-19) + +- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions +- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`) +- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0 + +## Release 2.4.0 (2016-08-16) + +- Adds two functions: `until` and `untilStep` + +## Release 2.3.0 (2016-06-21) + +- cat: Concatenate strings with whitespace separators. +- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First" +- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos" +- indent: Indent blocks of text in a way that is sensitive to "\n" characters. + +## Release 2.2.0 (2016-04-21) + +- Added a `genPrivateKey` function (Thanks @bacongobbler) + +## Release 2.1.0 (2016-03-30) + +- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`. +- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output. + +## Release 2.0.0 (2016-03-29) + +Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented. + +- `min` complements `max` (formerly `biggest`) +- `empty` indicates that a value is the empty value for its type +- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}` +- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}` +- Date formatters have been added for HTML dates (as used in `date` input fields) +- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`). + +## Release 1.2.0 (2016-02-01) + +- Added quote and squote +- Added b32enc and b32dec +- add now takes varargs +- biggest now takes varargs + +## Release 1.1.0 (2015-12-29) + +- Added #4: Added contains function. strings.Contains, but with the arguments + switched to simplify common pipelines. (thanks krancour) +- Added Travis-CI testing support + +## Release 1.0.0 (2015-12-23) + +- Initial release diff --git a/go-controller/vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt b/go-controller/vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt new file mode 100644 index 0000000000..f311b1eaaa --- /dev/null +++ b/go-controller/vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2013-2020 Masterminds + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/go-controller/vendor/github.com/go-task/slim-sprig/v3/README.md b/go-controller/vendor/github.com/go-task/slim-sprig/v3/README.md new file mode 100644 index 0000000000..b5ab564254 --- /dev/null +++ b/go-controller/vendor/github.com/go-task/slim-sprig/v3/README.md @@ -0,0 +1,73 @@ +# Slim-Sprig: Template functions for Go templates [![Go Reference](https://pkg.go.dev/badge/github.com/go-task/slim-sprig/v3.svg)](https://pkg.go.dev/github.com/go-task/slim-sprig/v3) + +Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with +all functions that depend on external (non standard library) or crypto packages +removed. +The reason for this is to make this library more lightweight. Most of these +functions (specially crypto ones) are not needed on most apps, but costs a lot +in terms of binary size and compilation time. + +## Usage + +**Template developers**: Please use Slim-Sprig's [function documentation](https://go-task.github.io/slim-sprig/) for +detailed instructions and code snippets for the >100 template functions available. + +**Go developers**: If you'd like to include Slim-Sprig as a library in your program, +our API documentation is available [at GoDoc.org](http://godoc.org/github.com/go-task/slim-sprig). + +For standard usage, read on. + +### Load the Slim-Sprig library + +To load the Slim-Sprig `FuncMap`: + +```go + +import ( + "html/template" + + "github.com/go-task/slim-sprig" +) + +// This example illustrates that the FuncMap *must* be set before the +// templates themselves are loaded. +tpl := template.Must( + template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html") +) +``` + +### Calling the functions inside of templates + +By convention, all functions are lowercase. This seems to follow the Go +idiom for template functions (as opposed to template methods, which are +TitleCase). For example, this: + +``` +{{ "hello!" | upper | repeat 5 }} +``` + +produces this: + +``` +HELLO!HELLO!HELLO!HELLO!HELLO! +``` + +## Principles Driving Our Function Selection + +We followed these principles to decide which functions to add and how to implement them: + +- Use template functions to build layout. The following + types of operations are within the domain of template functions: + - Formatting + - Layout + - Simple type conversions + - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic) +- Template functions should not return errors unless there is no way to print + a sensible value. For example, converting a string to an integer should not + produce an error if conversion fails. Instead, it should display a default + value. +- Simple math is necessary for grid layouts, pagers, and so on. Complex math + (anything other than arithmetic) should be done outside of templates. +- Template functions only deal with the data passed into them. They never retrieve + data from a source. +- Finally, do not override core Go template functions. diff --git a/go-controller/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml b/go-controller/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml new file mode 100644 index 0000000000..8e6346bb19 --- /dev/null +++ b/go-controller/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml @@ -0,0 +1,12 @@ +# https://taskfile.dev + +version: '3' + +tasks: + default: + cmds: + - task: test + + test: + cmds: + - go test -v . diff --git a/go-controller/vendor/github.com/go-task/slim-sprig/v3/crypto.go b/go-controller/vendor/github.com/go-task/slim-sprig/v3/crypto.go new file mode 100644 index 0000000000..d06e516d49 --- /dev/null +++ b/go-controller/vendor/github.com/go-task/slim-sprig/v3/crypto.go @@ -0,0 +1,24 @@ +package sprig + +import ( + "crypto/sha1" + "crypto/sha256" + "encoding/hex" + "fmt" + "hash/adler32" +) + +func sha256sum(input string) string { + hash := sha256.Sum256([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func sha1sum(input string) string { + hash := sha1.Sum([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func adler32sum(input string) string { + hash := adler32.Checksum([]byte(input)) + return fmt.Sprintf("%d", hash) +} diff --git a/go-controller/vendor/github.com/go-task/slim-sprig/v3/date.go b/go-controller/vendor/github.com/go-task/slim-sprig/v3/date.go new file mode 100644 index 0000000000..ed022ddaca --- /dev/null +++ b/go-controller/vendor/github.com/go-task/slim-sprig/v3/date.go @@ -0,0 +1,152 @@ +package sprig + +import ( + "strconv" + "time" +) + +// Given a format and a date, format the date string. +// +// Date can be a `time.Time` or an `int, int32, int64`. +// In the later case, it is treated as seconds since UNIX +// epoch. +func date(fmt string, date interface{}) string { + return dateInZone(fmt, date, "Local") +} + +func htmlDate(date interface{}) string { + return dateInZone("2006-01-02", date, "Local") +} + +func htmlDateInZone(date interface{}, zone string) string { + return dateInZone("2006-01-02", date, zone) +} + +func dateInZone(fmt string, date interface{}, zone string) string { + var t time.Time + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case *time.Time: + t = *date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + case int32: + t = time.Unix(int64(date), 0) + } + + loc, err := time.LoadLocation(zone) + if err != nil { + loc, _ = time.LoadLocation("UTC") + } + + return t.In(loc).Format(fmt) +} + +func dateModify(fmt string, date time.Time) time.Time { + d, err := time.ParseDuration(fmt) + if err != nil { + return date + } + return date.Add(d) +} + +func mustDateModify(fmt string, date time.Time) (time.Time, error) { + d, err := time.ParseDuration(fmt) + if err != nil { + return time.Time{}, err + } + return date.Add(d), nil +} + +func dateAgo(date interface{}) string { + var t time.Time + + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + } + // Drop resolution to seconds + duration := time.Since(t).Round(time.Second) + return duration.String() +} + +func duration(sec interface{}) string { + var n int64 + switch value := sec.(type) { + default: + n = 0 + case string: + n, _ = strconv.ParseInt(value, 10, 64) + case int64: + n = value + } + return (time.Duration(n) * time.Second).String() +} + +func durationRound(duration interface{}) string { + var d time.Duration + switch duration := duration.(type) { + default: + d = 0 + case string: + d, _ = time.ParseDuration(duration) + case int64: + d = time.Duration(duration) + case time.Time: + d = time.Since(duration) + } + + u := uint64(d) + neg := d < 0 + if neg { + u = -u + } + + var ( + year = uint64(time.Hour) * 24 * 365 + month = uint64(time.Hour) * 24 * 30 + day = uint64(time.Hour) * 24 + hour = uint64(time.Hour) + minute = uint64(time.Minute) + second = uint64(time.Second) + ) + switch { + case u > year: + return strconv.FormatUint(u/year, 10) + "y" + case u > month: + return strconv.FormatUint(u/month, 10) + "mo" + case u > day: + return strconv.FormatUint(u/day, 10) + "d" + case u > hour: + return strconv.FormatUint(u/hour, 10) + "h" + case u > minute: + return strconv.FormatUint(u/minute, 10) + "m" + case u > second: + return strconv.FormatUint(u/second, 10) + "s" + } + return "0s" +} + +func toDate(fmt, str string) time.Time { + t, _ := time.ParseInLocation(fmt, str, time.Local) + return t +} + +func mustToDate(fmt, str string) (time.Time, error) { + return time.ParseInLocation(fmt, str, time.Local) +} + +func unixEpoch(date time.Time) string { + return strconv.FormatInt(date.Unix(), 10) +} diff --git a/go-controller/vendor/github.com/go-task/slim-sprig/v3/defaults.go b/go-controller/vendor/github.com/go-task/slim-sprig/v3/defaults.go new file mode 100644 index 0000000000..b9f979666d --- /dev/null +++ b/go-controller/vendor/github.com/go-task/slim-sprig/v3/defaults.go @@ -0,0 +1,163 @@ +package sprig + +import ( + "bytes" + "encoding/json" + "math/rand" + "reflect" + "strings" + "time" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// dfault checks whether `given` is set, and returns default if not set. +// +// This returns `d` if `given` appears not to be set, and `given` otherwise. +// +// For numeric types 0 is unset. +// For strings, maps, arrays, and slices, len() = 0 is considered unset. +// For bool, false is unset. +// Structs are never considered unset. +// +// For everything else, including pointers, a nil value is unset. +func dfault(d interface{}, given ...interface{}) interface{} { + + if empty(given) || empty(given[0]) { + return d + } + return given[0] +} + +// empty returns true if the given value has the zero value for its type. +func empty(given interface{}) bool { + g := reflect.ValueOf(given) + if !g.IsValid() { + return true + } + + // Basically adapted from text/template.isTrue + switch g.Kind() { + default: + return g.IsNil() + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return g.Len() == 0 + case reflect.Bool: + return !g.Bool() + case reflect.Complex64, reflect.Complex128: + return g.Complex() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return g.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return g.Uint() == 0 + case reflect.Float32, reflect.Float64: + return g.Float() == 0 + case reflect.Struct: + return false + } +} + +// coalesce returns the first non-empty value. +func coalesce(v ...interface{}) interface{} { + for _, val := range v { + if !empty(val) { + return val + } + } + return nil +} + +// all returns true if empty(x) is false for all values x in the list. +// If the list is empty, return true. +func all(v ...interface{}) bool { + for _, val := range v { + if empty(val) { + return false + } + } + return true +} + +// any returns true if empty(x) is false for any x in the list. +// If the list is empty, return false. +func any(v ...interface{}) bool { + for _, val := range v { + if !empty(val) { + return true + } + } + return false +} + +// fromJson decodes JSON into a structured value, ignoring errors. +func fromJson(v string) interface{} { + output, _ := mustFromJson(v) + return output +} + +// mustFromJson decodes JSON into a structured value, returning errors. +func mustFromJson(v string) (interface{}, error) { + var output interface{} + err := json.Unmarshal([]byte(v), &output) + return output, err +} + +// toJson encodes an item into a JSON string +func toJson(v interface{}) string { + output, _ := json.Marshal(v) + return string(output) +} + +func mustToJson(v interface{}) (string, error) { + output, err := json.Marshal(v) + if err != nil { + return "", err + } + return string(output), nil +} + +// toPrettyJson encodes an item into a pretty (indented) JSON string +func toPrettyJson(v interface{}) string { + output, _ := json.MarshalIndent(v, "", " ") + return string(output) +} + +func mustToPrettyJson(v interface{}) (string, error) { + output, err := json.MarshalIndent(v, "", " ") + if err != nil { + return "", err + } + return string(output), nil +} + +// toRawJson encodes an item into a JSON string with no escaping of HTML characters. +func toRawJson(v interface{}) string { + output, err := mustToRawJson(v) + if err != nil { + panic(err) + } + return string(output) +} + +// mustToRawJson encodes an item into a JSON string with no escaping of HTML characters. +func mustToRawJson(v interface{}) (string, error) { + buf := new(bytes.Buffer) + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err := enc.Encode(&v) + if err != nil { + return "", err + } + return strings.TrimSuffix(buf.String(), "\n"), nil +} + +// ternary returns the first value if the last value is true, otherwise returns the second value. +func ternary(vt interface{}, vf interface{}, v bool) interface{} { + if v { + return vt + } + + return vf +} diff --git a/go-controller/vendor/github.com/go-task/slim-sprig/v3/dict.go b/go-controller/vendor/github.com/go-task/slim-sprig/v3/dict.go new file mode 100644 index 0000000000..77ebc61b18 --- /dev/null +++ b/go-controller/vendor/github.com/go-task/slim-sprig/v3/dict.go @@ -0,0 +1,118 @@ +package sprig + +func get(d map[string]interface{}, key string) interface{} { + if val, ok := d[key]; ok { + return val + } + return "" +} + +func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} { + d[key] = value + return d +} + +func unset(d map[string]interface{}, key string) map[string]interface{} { + delete(d, key) + return d +} + +func hasKey(d map[string]interface{}, key string) bool { + _, ok := d[key] + return ok +} + +func pluck(key string, d ...map[string]interface{}) []interface{} { + res := []interface{}{} + for _, dict := range d { + if val, ok := dict[key]; ok { + res = append(res, val) + } + } + return res +} + +func keys(dicts ...map[string]interface{}) []string { + k := []string{} + for _, dict := range dicts { + for key := range dict { + k = append(k, key) + } + } + return k +} + +func pick(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + for _, k := range keys { + if v, ok := dict[k]; ok { + res[k] = v + } + } + return res +} + +func omit(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + + omit := make(map[string]bool, len(keys)) + for _, k := range keys { + omit[k] = true + } + + for k, v := range dict { + if _, ok := omit[k]; !ok { + res[k] = v + } + } + return res +} + +func dict(v ...interface{}) map[string]interface{} { + dict := map[string]interface{}{} + lenv := len(v) + for i := 0; i < lenv; i += 2 { + key := strval(v[i]) + if i+1 >= lenv { + dict[key] = "" + continue + } + dict[key] = v[i+1] + } + return dict +} + +func values(dict map[string]interface{}) []interface{} { + values := []interface{}{} + for _, value := range dict { + values = append(values, value) + } + + return values +} + +func dig(ps ...interface{}) (interface{}, error) { + if len(ps) < 3 { + panic("dig needs at least three arguments") + } + dict := ps[len(ps)-1].(map[string]interface{}) + def := ps[len(ps)-2] + ks := make([]string, len(ps)-2) + for i := 0; i < len(ks); i++ { + ks[i] = ps[i].(string) + } + + return digFromDict(dict, def, ks) +} + +func digFromDict(dict map[string]interface{}, d interface{}, ks []string) (interface{}, error) { + k, ns := ks[0], ks[1:len(ks)] + step, has := dict[k] + if !has { + return d, nil + } + if len(ns) == 0 { + return step, nil + } + return digFromDict(step.(map[string]interface{}), d, ns) +} diff --git a/go-controller/vendor/github.com/go-task/slim-sprig/v3/doc.go b/go-controller/vendor/github.com/go-task/slim-sprig/v3/doc.go new file mode 100644 index 0000000000..aabb9d4489 --- /dev/null +++ b/go-controller/vendor/github.com/go-task/slim-sprig/v3/doc.go @@ -0,0 +1,19 @@ +/* +Package sprig provides template functions for Go. + +This package contains a number of utility functions for working with data +inside of Go `html/template` and `text/template` files. + +To add these functions, use the `template.Funcs()` method: + + t := templates.New("foo").Funcs(sprig.FuncMap()) + +Note that you should add the function map before you parse any template files. + + In several cases, Sprig reverses the order of arguments from the way they + appear in the standard library. This is to make it easier to pipe + arguments into functions. + +See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions. +*/ +package sprig diff --git a/go-controller/vendor/github.com/go-task/slim-sprig/v3/functions.go b/go-controller/vendor/github.com/go-task/slim-sprig/v3/functions.go new file mode 100644 index 0000000000..5ea74f8993 --- /dev/null +++ b/go-controller/vendor/github.com/go-task/slim-sprig/v3/functions.go @@ -0,0 +1,317 @@ +package sprig + +import ( + "errors" + "html/template" + "math/rand" + "os" + "path" + "path/filepath" + "reflect" + "strconv" + "strings" + ttemplate "text/template" + "time" +) + +// FuncMap produces the function map. +// +// Use this to pass the functions into the template engine: +// +// tpl := template.New("foo").Funcs(sprig.FuncMap())) +// +func FuncMap() template.FuncMap { + return HtmlFuncMap() +} + +// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions. +func HermeticTxtFuncMap() ttemplate.FuncMap { + r := TxtFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions. +func HermeticHtmlFuncMap() template.FuncMap { + r := HtmlFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// TxtFuncMap returns a 'text/template'.FuncMap +func TxtFuncMap() ttemplate.FuncMap { + return ttemplate.FuncMap(GenericFuncMap()) +} + +// HtmlFuncMap returns an 'html/template'.Funcmap +func HtmlFuncMap() template.FuncMap { + return template.FuncMap(GenericFuncMap()) +} + +// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}. +func GenericFuncMap() map[string]interface{} { + gfm := make(map[string]interface{}, len(genericMap)) + for k, v := range genericMap { + gfm[k] = v + } + return gfm +} + +// These functions are not guaranteed to evaluate to the same result for given input, because they +// refer to the environment or global state. +var nonhermeticFunctions = []string{ + // Date functions + "date", + "date_in_zone", + "date_modify", + "now", + "htmlDate", + "htmlDateInZone", + "dateInZone", + "dateModify", + + // Strings + "randAlphaNum", + "randAlpha", + "randAscii", + "randNumeric", + "randBytes", + "uuidv4", + + // OS + "env", + "expandenv", + + // Network + "getHostByName", +} + +var genericMap = map[string]interface{}{ + "hello": func() string { return "Hello!" }, + + // Date functions + "ago": dateAgo, + "date": date, + "date_in_zone": dateInZone, + "date_modify": dateModify, + "dateInZone": dateInZone, + "dateModify": dateModify, + "duration": duration, + "durationRound": durationRound, + "htmlDate": htmlDate, + "htmlDateInZone": htmlDateInZone, + "must_date_modify": mustDateModify, + "mustDateModify": mustDateModify, + "mustToDate": mustToDate, + "now": time.Now, + "toDate": toDate, + "unixEpoch": unixEpoch, + + // Strings + "trunc": trunc, + "trim": strings.TrimSpace, + "upper": strings.ToUpper, + "lower": strings.ToLower, + "title": strings.Title, + "substr": substring, + // Switch order so that "foo" | repeat 5 + "repeat": func(count int, str string) string { return strings.Repeat(str, count) }, + // Deprecated: Use trimAll. + "trimall": func(a, b string) string { return strings.Trim(b, a) }, + // Switch order so that "$foo" | trimall "$" + "trimAll": func(a, b string) string { return strings.Trim(b, a) }, + "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) }, + "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) }, + // Switch order so that "foobar" | contains "foo" + "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, + "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, + "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, + "quote": quote, + "squote": squote, + "cat": cat, + "indent": indent, + "nindent": nindent, + "replace": replace, + "plural": plural, + "sha1sum": sha1sum, + "sha256sum": sha256sum, + "adler32sum": adler32sum, + "toString": strval, + + // Wrap Atoi to stop errors. + "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i }, + "int64": toInt64, + "int": toInt, + "float64": toFloat64, + "seq": seq, + "toDecimal": toDecimal, + + //"gt": func(a, b int) bool {return a > b}, + //"gte": func(a, b int) bool {return a >= b}, + //"lt": func(a, b int) bool {return a < b}, + //"lte": func(a, b int) bool {return a <= b}, + + // split "/" foo/bar returns map[int]string{0: foo, 1: bar} + "split": split, + "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) }, + // splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu} + "splitn": splitn, + "toStrings": strslice, + + "until": until, + "untilStep": untilStep, + + // VERY basic arithmetic. + "add1": func(i interface{}) int64 { return toInt64(i) + 1 }, + "add": func(i ...interface{}) int64 { + var a int64 = 0 + for _, b := range i { + a += toInt64(b) + } + return a + }, + "sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) }, + "div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) }, + "mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) }, + "mul": func(a interface{}, v ...interface{}) int64 { + val := toInt64(a) + for _, b := range v { + val = val * toInt64(b) + } + return val + }, + "randInt": func(min, max int) int { return rand.Intn(max-min) + min }, + "biggest": max, + "max": max, + "min": min, + "maxf": maxf, + "minf": minf, + "ceil": ceil, + "floor": floor, + "round": round, + + // string slices. Note that we reverse the order b/c that's better + // for template processing. + "join": join, + "sortAlpha": sortAlpha, + + // Defaults + "default": dfault, + "empty": empty, + "coalesce": coalesce, + "all": all, + "any": any, + "compact": compact, + "mustCompact": mustCompact, + "fromJson": fromJson, + "toJson": toJson, + "toPrettyJson": toPrettyJson, + "toRawJson": toRawJson, + "mustFromJson": mustFromJson, + "mustToJson": mustToJson, + "mustToPrettyJson": mustToPrettyJson, + "mustToRawJson": mustToRawJson, + "ternary": ternary, + + // Reflection + "typeOf": typeOf, + "typeIs": typeIs, + "typeIsLike": typeIsLike, + "kindOf": kindOf, + "kindIs": kindIs, + "deepEqual": reflect.DeepEqual, + + // OS: + "env": os.Getenv, + "expandenv": os.ExpandEnv, + + // Network: + "getHostByName": getHostByName, + + // Paths: + "base": path.Base, + "dir": path.Dir, + "clean": path.Clean, + "ext": path.Ext, + "isAbs": path.IsAbs, + + // Filepaths: + "osBase": filepath.Base, + "osClean": filepath.Clean, + "osDir": filepath.Dir, + "osExt": filepath.Ext, + "osIsAbs": filepath.IsAbs, + + // Encoding: + "b64enc": base64encode, + "b64dec": base64decode, + "b32enc": base32encode, + "b32dec": base32decode, + + // Data Structures: + "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. + "list": list, + "dict": dict, + "get": get, + "set": set, + "unset": unset, + "hasKey": hasKey, + "pluck": pluck, + "keys": keys, + "pick": pick, + "omit": omit, + "values": values, + + "append": push, "push": push, + "mustAppend": mustPush, "mustPush": mustPush, + "prepend": prepend, + "mustPrepend": mustPrepend, + "first": first, + "mustFirst": mustFirst, + "rest": rest, + "mustRest": mustRest, + "last": last, + "mustLast": mustLast, + "initial": initial, + "mustInitial": mustInitial, + "reverse": reverse, + "mustReverse": mustReverse, + "uniq": uniq, + "mustUniq": mustUniq, + "without": without, + "mustWithout": mustWithout, + "has": has, + "mustHas": mustHas, + "slice": slice, + "mustSlice": mustSlice, + "concat": concat, + "dig": dig, + "chunk": chunk, + "mustChunk": mustChunk, + + // Flow Control: + "fail": func(msg string) (string, error) { return "", errors.New(msg) }, + + // Regex + "regexMatch": regexMatch, + "mustRegexMatch": mustRegexMatch, + "regexFindAll": regexFindAll, + "mustRegexFindAll": mustRegexFindAll, + "regexFind": regexFind, + "mustRegexFind": mustRegexFind, + "regexReplaceAll": regexReplaceAll, + "mustRegexReplaceAll": mustRegexReplaceAll, + "regexReplaceAllLiteral": regexReplaceAllLiteral, + "mustRegexReplaceAllLiteral": mustRegexReplaceAllLiteral, + "regexSplit": regexSplit, + "mustRegexSplit": mustRegexSplit, + "regexQuoteMeta": regexQuoteMeta, + + // URLs: + "urlParse": urlParse, + "urlJoin": urlJoin, +} diff --git a/go-controller/vendor/github.com/go-task/slim-sprig/v3/list.go b/go-controller/vendor/github.com/go-task/slim-sprig/v3/list.go new file mode 100644 index 0000000000..ca0fbb7893 --- /dev/null +++ b/go-controller/vendor/github.com/go-task/slim-sprig/v3/list.go @@ -0,0 +1,464 @@ +package sprig + +import ( + "fmt" + "math" + "reflect" + "sort" +) + +// Reflection is used in these functions so that slices and arrays of strings, +// ints, and other types not implementing []interface{} can be worked with. +// For example, this is useful if you need to work on the output of regexs. + +func list(v ...interface{}) []interface{} { + return v +} + +func push(list interface{}, v interface{}) []interface{} { + l, err := mustPush(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPush(list interface{}, v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append(nl, v), nil + + default: + return nil, fmt.Errorf("Cannot push on type %s", tp) + } +} + +func prepend(list interface{}, v interface{}) []interface{} { + l, err := mustPrepend(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPrepend(list interface{}, v interface{}) ([]interface{}, error) { + //return append([]interface{}{v}, list...) + + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append([]interface{}{v}, nl...), nil + + default: + return nil, fmt.Errorf("Cannot prepend on type %s", tp) + } +} + +func chunk(size int, list interface{}) [][]interface{} { + l, err := mustChunk(size, list) + if err != nil { + panic(err) + } + + return l +} + +func mustChunk(size int, list interface{}) ([][]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + + cs := int(math.Floor(float64(l-1)/float64(size)) + 1) + nl := make([][]interface{}, cs) + + for i := 0; i < cs; i++ { + clen := size + if i == cs-1 { + clen = int(math.Floor(math.Mod(float64(l), float64(size)))) + if clen == 0 { + clen = size + } + } + + nl[i] = make([]interface{}, clen) + + for j := 0; j < clen; j++ { + ix := i*size + j + nl[i][j] = l2.Index(ix).Interface() + } + } + + return nl, nil + + default: + return nil, fmt.Errorf("Cannot chunk type %s", tp) + } +} + +func last(list interface{}) interface{} { + l, err := mustLast(list) + if err != nil { + panic(err) + } + + return l +} + +func mustLast(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(l - 1).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find last on type %s", tp) + } +} + +func first(list interface{}) interface{} { + l, err := mustFirst(list) + if err != nil { + panic(err) + } + + return l +} + +func mustFirst(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(0).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find first on type %s", tp) + } +} + +func rest(list interface{}) []interface{} { + l, err := mustRest(list) + if err != nil { + panic(err) + } + + return l +} + +func mustRest(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 1; i < l; i++ { + nl[i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find rest on type %s", tp) + } +} + +func initial(list interface{}) []interface{} { + l, err := mustInitial(list) + if err != nil { + panic(err) + } + + return l +} + +func mustInitial(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 0; i < l-1; i++ { + nl[i] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find initial on type %s", tp) + } +} + +func sortAlpha(list interface{}) []string { + k := reflect.Indirect(reflect.ValueOf(list)).Kind() + switch k { + case reflect.Slice, reflect.Array: + a := strslice(list) + s := sort.StringSlice(a) + s.Sort() + return s + } + return []string{strval(list)} +} + +func reverse(v interface{}) []interface{} { + l, err := mustReverse(v) + if err != nil { + panic(err) + } + + return l +} + +func mustReverse(v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(v).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(v) + + l := l2.Len() + // We do not sort in place because the incoming array should not be altered. + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[l-i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find reverse on type %s", tp) + } +} + +func compact(list interface{}) []interface{} { + l, err := mustCompact(list) + if err != nil { + panic(err) + } + + return l +} + +func mustCompact(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !empty(item) { + nl = append(nl, item) + } + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot compact on type %s", tp) + } +} + +func uniq(list interface{}) []interface{} { + l, err := mustUniq(list) + if err != nil { + panic(err) + } + + return l +} + +func mustUniq(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + dest := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(dest, item) { + dest = append(dest, item) + } + } + + return dest, nil + default: + return nil, fmt.Errorf("Cannot find uniq on type %s", tp) + } +} + +func inList(haystack []interface{}, needle interface{}) bool { + for _, h := range haystack { + if reflect.DeepEqual(needle, h) { + return true + } + } + return false +} + +func without(list interface{}, omit ...interface{}) []interface{} { + l, err := mustWithout(list, omit...) + if err != nil { + panic(err) + } + + return l +} + +func mustWithout(list interface{}, omit ...interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + res := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(omit, item) { + res = append(res, item) + } + } + + return res, nil + default: + return nil, fmt.Errorf("Cannot find without on type %s", tp) + } +} + +func has(needle interface{}, haystack interface{}) bool { + l, err := mustHas(needle, haystack) + if err != nil { + panic(err) + } + + return l +} + +func mustHas(needle interface{}, haystack interface{}) (bool, error) { + if haystack == nil { + return false, nil + } + tp := reflect.TypeOf(haystack).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(haystack) + var item interface{} + l := l2.Len() + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if reflect.DeepEqual(needle, item) { + return true, nil + } + } + + return false, nil + default: + return false, fmt.Errorf("Cannot find has on type %s", tp) + } +} + +// $list := [1, 2, 3, 4, 5] +// slice $list -> list[0:5] = list[:] +// slice $list 0 3 -> list[0:3] = list[:3] +// slice $list 3 5 -> list[3:5] +// slice $list 3 -> list[3:5] = list[3:] +func slice(list interface{}, indices ...interface{}) interface{} { + l, err := mustSlice(list, indices...) + if err != nil { + panic(err) + } + + return l +} + +func mustSlice(list interface{}, indices ...interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + var start, end int + if len(indices) > 0 { + start = toInt(indices[0]) + } + if len(indices) < 2 { + end = l + } else { + end = toInt(indices[1]) + } + + return l2.Slice(start, end).Interface(), nil + default: + return nil, fmt.Errorf("list should be type of slice or array but %s", tp) + } +} + +func concat(lists ...interface{}) interface{} { + var res []interface{} + for _, list := range lists { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + for i := 0; i < l2.Len(); i++ { + res = append(res, l2.Index(i).Interface()) + } + default: + panic(fmt.Sprintf("Cannot concat type %s as list", tp)) + } + } + return res +} diff --git a/go-controller/vendor/github.com/go-task/slim-sprig/v3/network.go b/go-controller/vendor/github.com/go-task/slim-sprig/v3/network.go new file mode 100644 index 0000000000..108d78a946 --- /dev/null +++ b/go-controller/vendor/github.com/go-task/slim-sprig/v3/network.go @@ -0,0 +1,12 @@ +package sprig + +import ( + "math/rand" + "net" +) + +func getHostByName(name string) string { + addrs, _ := net.LookupHost(name) + //TODO: add error handing when release v3 comes out + return addrs[rand.Intn(len(addrs))] +} diff --git a/go-controller/vendor/github.com/go-task/slim-sprig/v3/numeric.go b/go-controller/vendor/github.com/go-task/slim-sprig/v3/numeric.go new file mode 100644 index 0000000000..98cbb37a19 --- /dev/null +++ b/go-controller/vendor/github.com/go-task/slim-sprig/v3/numeric.go @@ -0,0 +1,228 @@ +package sprig + +import ( + "fmt" + "math" + "reflect" + "strconv" + "strings" +) + +// toFloat64 converts 64-bit floats +func toFloat64(v interface{}) float64 { + if str, ok := v.(string); ok { + iv, err := strconv.ParseFloat(str, 64) + if err != nil { + return 0 + } + return iv + } + + val := reflect.Indirect(reflect.ValueOf(v)) + switch val.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return float64(val.Int()) + case reflect.Uint8, reflect.Uint16, reflect.Uint32: + return float64(val.Uint()) + case reflect.Uint, reflect.Uint64: + return float64(val.Uint()) + case reflect.Float32, reflect.Float64: + return val.Float() + case reflect.Bool: + if val.Bool() { + return 1 + } + return 0 + default: + return 0 + } +} + +func toInt(v interface{}) int { + //It's not optimal. Bud I don't want duplicate toInt64 code. + return int(toInt64(v)) +} + +// toInt64 converts integer types to 64-bit integers +func toInt64(v interface{}) int64 { + if str, ok := v.(string); ok { + iv, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return 0 + } + return iv + } + + val := reflect.Indirect(reflect.ValueOf(v)) + switch val.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return val.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32: + return int64(val.Uint()) + case reflect.Uint, reflect.Uint64: + tv := val.Uint() + if tv <= math.MaxInt64 { + return int64(tv) + } + // TODO: What is the sensible thing to do here? + return math.MaxInt64 + case reflect.Float32, reflect.Float64: + return int64(val.Float()) + case reflect.Bool: + if val.Bool() { + return 1 + } + return 0 + default: + return 0 + } +} + +func max(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb > aa { + aa = bb + } + } + return aa +} + +func maxf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Max(aa, bb) + } + return aa +} + +func min(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb < aa { + aa = bb + } + } + return aa +} + +func minf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Min(aa, bb) + } + return aa +} + +func until(count int) []int { + step := 1 + if count < 0 { + step = -1 + } + return untilStep(0, count, step) +} + +func untilStep(start, stop, step int) []int { + v := []int{} + + if stop < start { + if step >= 0 { + return v + } + for i := start; i > stop; i += step { + v = append(v, i) + } + return v + } + + if step <= 0 { + return v + } + for i := start; i < stop; i += step { + v = append(v, i) + } + return v +} + +func floor(a interface{}) float64 { + aa := toFloat64(a) + return math.Floor(aa) +} + +func ceil(a interface{}) float64 { + aa := toFloat64(a) + return math.Ceil(aa) +} + +func round(a interface{}, p int, rOpt ...float64) float64 { + roundOn := .5 + if len(rOpt) > 0 { + roundOn = rOpt[0] + } + val := toFloat64(a) + places := toFloat64(p) + + var round float64 + pow := math.Pow(10, places) + digit := pow * val + _, div := math.Modf(digit) + if div >= roundOn { + round = math.Ceil(digit) + } else { + round = math.Floor(digit) + } + return round / pow +} + +// converts unix octal to decimal +func toDecimal(v interface{}) int64 { + result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64) + if err != nil { + return 0 + } + return result +} + +func seq(params ...int) string { + increment := 1 + switch len(params) { + case 0: + return "" + case 1: + start := 1 + end := params[0] + if end < start { + increment = -1 + } + return intArrayToString(untilStep(start, end+increment, increment), " ") + case 3: + start := params[0] + end := params[2] + step := params[1] + if end < start { + increment = -1 + if step > 0 { + return "" + } + } + return intArrayToString(untilStep(start, end+increment, step), " ") + case 2: + start := params[0] + end := params[1] + step := 1 + if end < start { + step = -1 + } + return intArrayToString(untilStep(start, end+step, step), " ") + default: + return "" + } +} + +func intArrayToString(slice []int, delimeter string) string { + return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(slice)), delimeter), "[]") +} diff --git a/go-controller/vendor/github.com/go-task/slim-sprig/v3/reflect.go b/go-controller/vendor/github.com/go-task/slim-sprig/v3/reflect.go new file mode 100644 index 0000000000..8a65c132f0 --- /dev/null +++ b/go-controller/vendor/github.com/go-task/slim-sprig/v3/reflect.go @@ -0,0 +1,28 @@ +package sprig + +import ( + "fmt" + "reflect" +) + +// typeIs returns true if the src is the type named in target. +func typeIs(target string, src interface{}) bool { + return target == typeOf(src) +} + +func typeIsLike(target string, src interface{}) bool { + t := typeOf(src) + return target == t || "*"+target == t +} + +func typeOf(src interface{}) string { + return fmt.Sprintf("%T", src) +} + +func kindIs(target string, src interface{}) bool { + return target == kindOf(src) +} + +func kindOf(src interface{}) string { + return reflect.ValueOf(src).Kind().String() +} diff --git a/go-controller/vendor/github.com/go-task/slim-sprig/v3/regex.go b/go-controller/vendor/github.com/go-task/slim-sprig/v3/regex.go new file mode 100644 index 0000000000..fab5510189 --- /dev/null +++ b/go-controller/vendor/github.com/go-task/slim-sprig/v3/regex.go @@ -0,0 +1,83 @@ +package sprig + +import ( + "regexp" +) + +func regexMatch(regex string, s string) bool { + match, _ := regexp.MatchString(regex, s) + return match +} + +func mustRegexMatch(regex string, s string) (bool, error) { + return regexp.MatchString(regex, s) +} + +func regexFindAll(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.FindAllString(s, n) +} + +func mustRegexFindAll(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.FindAllString(s, n), nil +} + +func regexFind(regex string, s string) string { + r := regexp.MustCompile(regex) + return r.FindString(s) +} + +func mustRegexFind(regex string, s string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.FindString(s), nil +} + +func regexReplaceAll(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllString(s, repl) +} + +func mustRegexReplaceAll(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllString(s, repl), nil +} + +func regexReplaceAllLiteral(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllLiteralString(s, repl) +} + +func mustRegexReplaceAllLiteral(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllLiteralString(s, repl), nil +} + +func regexSplit(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.Split(s, n) +} + +func mustRegexSplit(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.Split(s, n), nil +} + +func regexQuoteMeta(s string) string { + return regexp.QuoteMeta(s) +} diff --git a/go-controller/vendor/github.com/go-task/slim-sprig/v3/strings.go b/go-controller/vendor/github.com/go-task/slim-sprig/v3/strings.go new file mode 100644 index 0000000000..3c62d6b6f2 --- /dev/null +++ b/go-controller/vendor/github.com/go-task/slim-sprig/v3/strings.go @@ -0,0 +1,189 @@ +package sprig + +import ( + "encoding/base32" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "strings" +) + +func base64encode(v string) string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +func base64decode(v string) string { + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func base32encode(v string) string { + return base32.StdEncoding.EncodeToString([]byte(v)) +} + +func base32decode(v string) string { + data, err := base32.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func quote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("%q", strval(s))) + } + } + return strings.Join(out, " ") +} + +func squote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("'%v'", s)) + } + } + return strings.Join(out, " ") +} + +func cat(v ...interface{}) string { + v = removeNilElements(v) + r := strings.TrimSpace(strings.Repeat("%v ", len(v))) + return fmt.Sprintf(r, v...) +} + +func indent(spaces int, v string) string { + pad := strings.Repeat(" ", spaces) + return pad + strings.Replace(v, "\n", "\n"+pad, -1) +} + +func nindent(spaces int, v string) string { + return "\n" + indent(spaces, v) +} + +func replace(old, new, src string) string { + return strings.Replace(src, old, new, -1) +} + +func plural(one, many string, count int) string { + if count == 1 { + return one + } + return many +} + +func strslice(v interface{}) []string { + switch v := v.(type) { + case []string: + return v + case []interface{}: + b := make([]string, 0, len(v)) + for _, s := range v { + if s != nil { + b = append(b, strval(s)) + } + } + return b + default: + val := reflect.ValueOf(v) + switch val.Kind() { + case reflect.Array, reflect.Slice: + l := val.Len() + b := make([]string, 0, l) + for i := 0; i < l; i++ { + value := val.Index(i).Interface() + if value != nil { + b = append(b, strval(value)) + } + } + return b + default: + if v == nil { + return []string{} + } + + return []string{strval(v)} + } + } +} + +func removeNilElements(v []interface{}) []interface{} { + newSlice := make([]interface{}, 0, len(v)) + for _, i := range v { + if i != nil { + newSlice = append(newSlice, i) + } + } + return newSlice +} + +func strval(v interface{}) string { + switch v := v.(type) { + case string: + return v + case []byte: + return string(v) + case error: + return v.Error() + case fmt.Stringer: + return v.String() + default: + return fmt.Sprintf("%v", v) + } +} + +func trunc(c int, s string) string { + if c < 0 && len(s)+c > 0 { + return s[len(s)+c:] + } + if c >= 0 && len(s) > c { + return s[:c] + } + return s +} + +func join(sep string, v interface{}) string { + return strings.Join(strslice(v), sep) +} + +func split(sep, orig string) map[string]string { + parts := strings.Split(orig, sep) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +func splitn(sep string, n int, orig string) map[string]string { + parts := strings.SplitN(orig, sep, n) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +// substring creates a substring of the given string. +// +// If start is < 0, this calls string[:end]. +// +// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:] +// +// Otherwise, this calls string[start, end]. +func substring(start, end int, s string) string { + if start < 0 { + return s[:end] + } + if end < 0 || end > len(s) { + return s[start:] + } + return s[start:end] +} diff --git a/go-controller/vendor/github.com/go-task/slim-sprig/v3/url.go b/go-controller/vendor/github.com/go-task/slim-sprig/v3/url.go new file mode 100644 index 0000000000..b8e120e19b --- /dev/null +++ b/go-controller/vendor/github.com/go-task/slim-sprig/v3/url.go @@ -0,0 +1,66 @@ +package sprig + +import ( + "fmt" + "net/url" + "reflect" +) + +func dictGetOrEmpty(dict map[string]interface{}, key string) string { + value, ok := dict[key] + if !ok { + return "" + } + tp := reflect.TypeOf(value).Kind() + if tp != reflect.String { + panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String())) + } + return reflect.ValueOf(value).String() +} + +// parses given URL to return dict object +func urlParse(v string) map[string]interface{} { + dict := map[string]interface{}{} + parsedURL, err := url.Parse(v) + if err != nil { + panic(fmt.Sprintf("unable to parse url: %s", err)) + } + dict["scheme"] = parsedURL.Scheme + dict["host"] = parsedURL.Host + dict["hostname"] = parsedURL.Hostname() + dict["path"] = parsedURL.Path + dict["query"] = parsedURL.RawQuery + dict["opaque"] = parsedURL.Opaque + dict["fragment"] = parsedURL.Fragment + if parsedURL.User != nil { + dict["userinfo"] = parsedURL.User.String() + } else { + dict["userinfo"] = "" + } + + return dict +} + +// join given dict to URL string +func urlJoin(d map[string]interface{}) string { + resURL := url.URL{ + Scheme: dictGetOrEmpty(d, "scheme"), + Host: dictGetOrEmpty(d, "host"), + Path: dictGetOrEmpty(d, "path"), + RawQuery: dictGetOrEmpty(d, "query"), + Opaque: dictGetOrEmpty(d, "opaque"), + Fragment: dictGetOrEmpty(d, "fragment"), + } + userinfo := dictGetOrEmpty(d, "userinfo") + var user *url.Userinfo + if userinfo != "" { + tempURL, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo)) + if err != nil { + panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err)) + } + user = tempURL.User + } + + resURL.User = user + return resURL.String() +} diff --git a/go-controller/vendor/github.com/godbus/dbus/v5/CONTRIBUTING.md b/go-controller/vendor/github.com/godbus/dbus/v5/CONTRIBUTING.md new file mode 100644 index 0000000000..c88f9b2bdd --- /dev/null +++ b/go-controller/vendor/github.com/godbus/dbus/v5/CONTRIBUTING.md @@ -0,0 +1,50 @@ +# How to Contribute + +## Getting Started + +- Fork the repository on GitHub +- Read the [README](README.markdown) for build and test instructions +- Play with the project, submit bugs, submit patches! + +## Contribution Flow + +This is a rough outline of what a contributor's workflow looks like: + +- Create a topic branch from where you want to base your work (usually master). +- Make commits of logical units. +- Make sure your commit messages are in the proper format (see below). +- Push your changes to a topic branch in your fork of the repository. +- Make sure the tests pass, and add any new tests as appropriate. +- Submit a pull request to the original repository. + +Thanks for your contributions! + +### Format of the Commit Message + +We follow a rough convention for commit messages that is designed to answer two +questions: what changed and why. The subject line should feature the what and +the body of the commit should describe the why. + +``` +scripts: add the test-cluster command + +this uses tmux to setup a test cluster that you can easily kill and +start for debugging. + +Fixes #38 +``` + +The format can be described more formally as follows: + +``` +: + + + +