From 63350d06443481c91cf5a7020892d18de9e55366 Mon Sep 17 00:00:00 2001 From: kubevirt-bot Date: Tue, 22 Nov 2022 07:17:40 +0000 Subject: [PATCH] Bump kubevirtci [021efaa Revert "bump: k8s-1.2[4,5] to cnao v0.81.0](https://github.com/kubevirt/kubevirtci/pull/895)"](https://github.com/kubevirt/kubevirtci/pull/904) [75a155b vm based providers: Change default dns host port](https://github.com/kubevirt/kubevirtci/pull/903) [71777d3 Support centos9 provisioner ](https://github.com/kubevirt/kubevirtci/pull/896) [3ddc5b7 Run bazelisk run //robots/cmd/kubevirtci-bumper:kubevirtci-bumper -- -ensure-last-three-minor-of v1 --k8s-provider-dir /home/prow/go/src/github.com/kubevirt/project-infra/../kubevirtci/cluster-provision/k8s](https://github.com/kubevirt/kubevirtci/pull/898) [765bf0b kind-1.23-sriov: Remove SR-IOV operator leftovers](https://github.com/kubevirt/kubevirtci/pull/899) [2f57cdf Create kind-1.23-sriov provider](https://github.com/kubevirt/kubevirtci/pull/897) [dd31ea6 bump: k8s-1.2[4,5] to cnao v0.81.0](https://github.com/kubevirt/kubevirtci/pull/895) [fa031f8 Bump default provider version](https://github.com/kubevirt/kubevirtci/pull/894) [8cca8c0 vm based providers: Expose a UDP port for DNS](https://github.com/kubevirt/kubevirtci/pull/867) [2fea446 kind-1.22-sriov, provider.sh: Remove unused SRIOV_TESTS_NS variable](https://github.com/kubevirt/kubevirtci/pull/891) ```release-note NONE ``` Signed-off-by: kubevirt-bot --- cluster-up-sha.txt | 2 +- .../cluster/ephemeral-provider-common.sh | 4 + .../cluster/kind-1.22-sriov/provider.sh | 4 - cluster-up/cluster/kind-1.23-sriov/OWNERS | 10 + cluster-up/cluster/kind-1.23-sriov/README.md | 101 ++++++++ .../kind-1.23-sriov/TROUBLESHOOTING.md | 60 +++++ .../kind-1.23-sriov/config_sriov_cluster.sh | 73 ++++++ .../cluster/kind-1.23-sriov/conformance.json | 47 ++++ .../cluster/kind-1.23-sriov/provider.sh | 69 ++++++ .../manifests/kustomization.yaml | 27 +++ .../manifests/multus/kustomization.yaml | 14 ++ .../manifests/multus/multus.yaml | 206 ++++++++++++++++ .../manifests/multus/patch-args.yaml | 6 + .../manifests/patch-node-selector.yaml.in | 3 + .../patch-sriovdp-resource-prefix.yaml.in | 3 + .../manifests/sriov-cni-daemonset.yaml | 51 ++++ .../sriov-components/manifests/sriov-ns.yaml | 4 + .../manifests/sriovdp-config.yaml.in | 17 ++ .../manifests/sriovdp-daemonset.yaml | 221 ++++++++++++++++++ .../sriov-components/sriov_components.sh | 212 +++++++++++++++++ .../sriov-node/configure_vfs.sh | 103 ++++++++ .../kind-1.23-sriov/sriov-node/node.sh | 124 ++++++++++ .../cluster/kind-1.23-sriov/sriovdp_setup.sh | 42 ++++ cluster-up/hack/common.sh | 4 +- cluster-up/version.txt | 2 +- hack/config-default.sh | 2 +- 26 files changed, 1402 insertions(+), 9 deletions(-) create mode 100644 cluster-up/cluster/kind-1.23-sriov/OWNERS create mode 100644 cluster-up/cluster/kind-1.23-sriov/README.md create mode 100644 cluster-up/cluster/kind-1.23-sriov/TROUBLESHOOTING.md create mode 100755 cluster-up/cluster/kind-1.23-sriov/config_sriov_cluster.sh create mode 100644 cluster-up/cluster/kind-1.23-sriov/conformance.json create mode 100755 cluster-up/cluster/kind-1.23-sriov/provider.sh create mode 100644 cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/kustomization.yaml create mode 100644 cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/multus/kustomization.yaml create mode 100644 cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/multus/multus.yaml create mode 100644 cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/multus/patch-args.yaml create mode 100644 cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/patch-node-selector.yaml.in create mode 100644 cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/patch-sriovdp-resource-prefix.yaml.in create mode 100644 cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/sriov-cni-daemonset.yaml create mode 100644 cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/sriov-ns.yaml create mode 100644 cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/sriovdp-config.yaml.in create mode 100644 cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/sriovdp-daemonset.yaml create mode 100644 cluster-up/cluster/kind-1.23-sriov/sriov-components/sriov_components.sh create mode 100755 cluster-up/cluster/kind-1.23-sriov/sriov-node/configure_vfs.sh create mode 100644 cluster-up/cluster/kind-1.23-sriov/sriov-node/node.sh create mode 100755 cluster-up/cluster/kind-1.23-sriov/sriovdp_setup.sh diff --git a/cluster-up-sha.txt b/cluster-up-sha.txt index a2882093695d..60c605d192b9 100644 --- a/cluster-up-sha.txt +++ b/cluster-up-sha.txt @@ -1 +1 @@ -207c456c3455dcd428406a332442d41bd1f96f76 +dd55172fb289e08cea5bba642e156b39ec6c8cab diff --git a/cluster-up/cluster/ephemeral-provider-common.sh b/cluster-up/cluster/ephemeral-provider-common.sh index 6d0ae8112e70..9d010e6d4bd7 100644 --- a/cluster-up/cluster/ephemeral-provider-common.sh +++ b/cluster-up/cluster/ephemeral-provider-common.sh @@ -4,6 +4,7 @@ set -e KUBEVIRT_WITH_ETC_IN_MEMORY=${KUBEVIRT_WITH_ETC_IN_MEMORY:-false} KUBEVIRT_WITH_ETC_CAPACITY=${KUBEVIRT_WITH_ETC_CAPACITY:-none} +KUBEVIRT_DNS_HOST_PORT=${KUBEVIRT_DNS_HOST_PORT:-31111} export KUBEVIRTCI_PODMAN_SOCKET=${KUBEVIRTCI_PODMAN_SOCKET:-"/run/podman/podman.sock"} @@ -87,6 +88,9 @@ function _registry_volume() { function _add_common_params() { # shellcheck disable=SC2155 local params="--nodes ${KUBEVIRT_NUM_NODES} --memory ${KUBEVIRT_MEMORY_SIZE} --cpu 6 --secondary-nics ${KUBEVIRT_NUM_SECONDARY_NICS} --random-ports --background --prefix $provider_prefix ${KUBEVIRT_PROVIDER} ${KUBEVIRT_PROVIDER_EXTRA_ARGS}" + + params=" --dns-port $KUBEVIRT_DNS_HOST_PORT $params" + if [[ $TARGET =~ windows_sysprep.* ]] && [ -n "$WINDOWS_SYSPREP_NFS_DIR" ]; then params=" --nfs-data $WINDOWS_SYSPREP_NFS_DIR $params" elif [[ $TARGET =~ windows.* ]] && [ -n "$WINDOWS_NFS_DIR" ]; then diff --git a/cluster-up/cluster/kind-1.22-sriov/provider.sh b/cluster-up/cluster/kind-1.22-sriov/provider.sh index 9faf391c7894..f20a145436c5 100755 --- a/cluster-up/cluster/kind-1.22-sriov/provider.sh +++ b/cluster-up/cluster/kind-1.22-sriov/provider.sh @@ -13,10 +13,6 @@ else export HOST_PORT=$ALTERNATE_HOST_PORT fi -#'kubevirt-test-default1' is the default namespace of -# Kubevirt SRIOV tests where the SRIOV VM's will be created. -SRIOV_TESTS_NS="${SRIOV_TESTS_NS:-kubevirt-test-default1}" - function set_kind_params() { export KIND_VERSION="${KIND_VERSION:-0.11.1}" export KIND_NODE_IMAGE="${KIND_NODE_IMAGE:-quay.io/kubevirtci/kindest_node:v1.22.2@sha256:f638a08c1f68fe2a99e724ace6df233a546eaf6713019a0b310130a4f91ebe7f}" diff --git a/cluster-up/cluster/kind-1.23-sriov/OWNERS b/cluster-up/cluster/kind-1.23-sriov/OWNERS new file mode 100644 index 000000000000..786c313b2c51 --- /dev/null +++ b/cluster-up/cluster/kind-1.23-sriov/OWNERS @@ -0,0 +1,10 @@ +filters: + ".*": + reviewers: + - qinqon + - oshoval + - phoracek + - ormergi + approvers: + - qinqon + - phoracek diff --git a/cluster-up/cluster/kind-1.23-sriov/README.md b/cluster-up/cluster/kind-1.23-sriov/README.md new file mode 100644 index 000000000000..e16a73b7ed33 --- /dev/null +++ b/cluster-up/cluster/kind-1.23-sriov/README.md @@ -0,0 +1,101 @@ +# K8S 1.23.13 with SR-IOV in a Kind cluster + +Provides a pre-deployed containerized k8s cluster with version 1.23.13 that runs +using [KinD](https://github.com/kubernetes-sigs/kind) +The cluster is completely ephemeral and is recreated on every cluster restart. The KubeVirt containers are built on the +local machine and are then pushed to a registry which is exposed at +`localhost:5000`. + +This version also expects to have SR-IOV enabled nics (SR-IOV Physical Function) on the current host, and will move +physical interfaces into the `KinD`'s cluster worker node(s) so that they can be used through multus and SR-IOV +components. + +This providers also deploys [multus](https://github.com/k8snetworkplumbingwg/multus-cni) +, [sriov-cni](https://github.com/k8snetworkplumbingwg/sriov-cni) +and [sriov-device-plugin](https://github.com/k8snetworkplumbingwg/sriov-network-device-plugin). + +## Bringing the cluster up + +```bash +export KUBEVIRT_PROVIDER=kind-1.23-sriov +export KUBEVIRT_NUM_NODES=3 +make cluster-up + +$ cluster-up/kubectl.sh get nodes +NAME STATUS ROLES AGE VERSION +sriov-control-plane Ready control-plane,master 20h v1.23.13 +sriov-worker Ready worker 20h v1.23.13 +sriov-worker2 Ready worker 20h v1.23.13 + +$ cluster-up/kubectl.sh get pods -n kube-system -l app=multus +NAME READY STATUS RESTARTS AGE +kube-multus-ds-amd64-d45n4 1/1 Running 0 20h +kube-multus-ds-amd64-g26xh 1/1 Running 0 20h +kube-multus-ds-amd64-mfh7c 1/1 Running 0 20h + +$ cluster-up/kubectl.sh get pods -n sriov -l app=sriov-cni +NAME READY STATUS RESTARTS AGE +kube-sriov-cni-ds-amd64-fv5cr 1/1 Running 0 20h +kube-sriov-cni-ds-amd64-q95q9 1/1 Running 0 20h + +$ cluster-up/kubectl.sh get pods -n sriov -l app=sriovdp +NAME READY STATUS RESTARTS AGE +kube-sriov-device-plugin-amd64-h7h84 1/1 Running 0 20h +kube-sriov-device-plugin-amd64-xrr5z 1/1 Running 0 20h +``` + +## Bringing the cluster down + +```bash +export KUBEVIRT_PROVIDER=kind-1.23-sriov +make cluster-down +``` + +This destroys the whole cluster, and moves the SR-IOV nics to the root network namespace. + +## Setting a custom kind version + +In order to use a custom kind image / kind version, export `KIND_NODE_IMAGE`, `KIND_VERSION`, `KUBECTL_PATH` before +running cluster-up. For example in order to use kind 0.9.0 (which is based on k8s-1.19.1) use: + +```bash +export KIND_NODE_IMAGE="kindest/node:v1.19.1@sha256:98cf5288864662e37115e362b23e4369c8c4a408f99cbc06e58ac30ddc721600" +export KIND_VERSION="0.9.0" +export KUBECTL_PATH="/usr/bin/kubectl" +``` + +This allows users to test or use custom images / different kind versions before making them official. +See https://github.com/kubernetes-sigs/kind/releases for details about node images according to the kind version. + +## Running multi SR-IOV clusters locally + +Kubevirtci SR-IOV provider supports running two clusters side by side with few known limitations. + +General considerations: + +- A SR-IOV PF must be available for each cluster. In order to achieve that, there are two options: + +1. Assign just one PF for each worker node of each cluster by using `export PF_COUNT_PER_NODE=1` (this is the default + value). +2. Optional method: `export PF_BLACKLIST=` the non used PFs, in order to prevent them from being allocated to + the current cluster. The user can list the PFs that should not be allocated to the current cluster, keeping in mind + that at least one (or 2 in case of migration), should not be listed, so they would be allocated for the current + cluster. Note: another reason to blacklist a PF, is in case its has a defect or should be kept for other operations ( + for example sniffing). + +- Clusters should be created one by another and not in parallel (to avoid races over SR-IOV PF's). +- The cluster names must be different. This can be achieved by setting `export CLUSTER_NAME=sriov2` on the 2nd cluster. + The default `CLUSTER_NAME` is `sriov`. The 2nd cluster registry would be exposed at `localhost:5001` automatically, + once the `CLUSTER_NAME` + is set to a non default value. +- Each cluster should be created on its own git clone folder, i.e: + `/root/project/kubevirtci1` + `/root/project/kubevirtci2` + In order to switch between them, change dir to that folder and set the env variables `KUBECONFIG` + and `KUBEVIRT_PROVIDER`. +- In case only one PF exists, for example if running on prow which will assign only one PF per job in its own DinD, + Kubevirtci is agnostic and nothing needs to be done, since all conditions above are met. +- Upper limit of the number of clusters that can be run on the same time equals number of PFs / number of PFs per + cluster, therefore, in case there is only one PF, only one cluster can be created. Locally the actual limit currently + supported is two clusters. +- In order to use `make cluster-down` please make sure the right `CLUSTER_NAME` is exported. diff --git a/cluster-up/cluster/kind-1.23-sriov/TROUBLESHOOTING.md b/cluster-up/cluster/kind-1.23-sriov/TROUBLESHOOTING.md new file mode 100644 index 000000000000..7b699427a410 --- /dev/null +++ b/cluster-up/cluster/kind-1.23-sriov/TROUBLESHOOTING.md @@ -0,0 +1,60 @@ +# How to troubleshoot a failing kind job + +If logging and output artifacts are not enough, there is a way to connect to a running CI pod and troubleshoot directly from there. + +## Pre-requisites + +- A working (enabled) account on the [CI cluster](shift.ovirt.org), specifically enabled to the `kubevirt-prow-jobs` project. +- The [mkpj tool](https://github.com/kubernetes/test-infra/tree/master/prow/cmd/mkpj) installed + +## Launching a custom job + +Through the `mkpj` tool, it's possible to craft a custom Prow Job that can be executed on the CI cluster. + +Just `go get` it by running `go get k8s.io/test-infra/prow/cmd/mkpj` + +Then run the following command from a checkout of the [project-infra repo](https://github.com/kubevirt/project-infra): + +```bash +mkpj --pull-number $KUBEVIRTPRNUMBER -job pull-kubevirt-e2e-kind-k8s-sriov-1.17.0 -job-config-path github/ci/prow/files/jobs/kubevirt/kubevirt-presubmits.yaml --config-path github/ci/prow/files/config.yaml > debugkind.yaml +``` + +You will end up having a ProwJob manifest in the `debugkind.yaml` file. + +It's strongly recommended to replace the job's name, as it will be easier to find and debug the relative pod, by replacing `metadata.name` with something more recognizeable. + +The $KUBEVIRTPRNUMBER can be an actual PR on the [kubevirt repo](https://github.com/kubevirt/kubevirt). + +In case we just want to debug the cluster provided by the CI, it's recommended to override the entry point, either in the test PR we are instrumenting (a good sample can be found [here](https://github.com/kubevirt/kubevirt/pull/3022)), or by overriding the entry point directly in the prow job's manifest. + +Remember that we want the cluster long living, so a long sleep must be provided as part of the entry point. + +Make sure you switch to the `kubevirt-prow-jobs` project, and apply the manifest: + +```bash + kubectl apply -f debugkind.yaml +``` + +You will end up with a ProwJob object, and a pod with the same name you gave to the ProwJob. + +Once the pod is up & running, connect to it via bash: + +```bash + kubectl exec -it debugprowjobpod bash +``` + +### Logistics + +Once you are in the pod, you'll be able to troubleshoot what's happening in the environment CI is running its tests. + +Run the follow to bring up a [kind](https://github.com/kubernetes-sigs/kind) cluster with a single node setup and the SR-IOV operator already setup to go (if it wasn't already done by the job itself). + +```bash +KUBEVIRT_PROVIDER=kind-k8s-sriov-1.17.0 make cluster-up +``` + +The kubeconfig file will be available under `/root/.kube/kind-config-sriov`. + +The `kubectl` binary is already on board and in `$PATH`. + +The container acting as node is the one named `sriov-control-plane`. You can even see what's in there by running `docker exec -it sriov-control-plane bash`. diff --git a/cluster-up/cluster/kind-1.23-sriov/config_sriov_cluster.sh b/cluster-up/cluster/kind-1.23-sriov/config_sriov_cluster.sh new file mode 100755 index 000000000000..af53bb91a569 --- /dev/null +++ b/cluster-up/cluster/kind-1.23-sriov/config_sriov_cluster.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +[ $(id -u) -ne 0 ] && echo "FATAL: this script requires sudo privileges" >&2 && exit 1 + +set -xe + +PF_COUNT_PER_NODE=${PF_COUNT_PER_NODE:-1} +[ $PF_COUNT_PER_NODE -le 0 ] && echo "FATAL: PF_COUNT_PER_NODE must be a positive integer" >&2 && exit 1 + +SCRIPT_PATH=$(dirname "$(realpath "$0")") + +source ${SCRIPT_PATH}/sriov-node/node.sh +source ${SCRIPT_PATH}/sriov-components/sriov_components.sh + +CONFIGURE_VFS_SCRIPT_PATH="$SCRIPT_PATH/sriov-node/configure_vfs.sh" + +SRIOV_COMPONENTS_NAMESPACE="sriov" +SRIOV_NODE_LABEL_KEY="sriov_capable" +SRIOV_NODE_LABEL_VALUE="true" +SRIOV_NODE_LABEL="$SRIOV_NODE_LABEL_KEY=$SRIOV_NODE_LABEL_VALUE" +SRIOVDP_RESOURCE_PREFIX="kubevirt.io" +SRIOVDP_RESOURCE_NAME="sriov_net" +VFS_DRIVER="vfio-pci" +VFS_DRIVER_KMODULE="vfio_pci" +VFS_COUNT="6" + +function validate_nodes_sriov_allocatable_resource() { + local -r resource_name="$SRIOVDP_RESOURCE_PREFIX/$SRIOVDP_RESOURCE_NAME" + local -r sriov_nodes=$(_kubectl get nodes -l $SRIOV_NODE_LABEL -o custom-columns=:.metadata.name --no-headers) + + local num_vfs + for sriov_node in $sriov_nodes; do + num_vfs=$(node::total_vfs_count "$sriov_node") + sriov_components::wait_allocatable_resource "$sriov_node" "$resource_name" "$num_vfs" + done +} + +worker_nodes=($(_kubectl get nodes -l node-role.kubernetes.io/worker -o custom-columns=:.metadata.name --no-headers)) +worker_nodes_count=${#worker_nodes[@]} +[ "$worker_nodes_count" -eq 0 ] && echo "FATAL: no worker nodes found" >&2 && exit 1 + +pfs_names=($(node::discover_host_pfs)) +pf_count="${#pfs_names[@]}" +[ "$pf_count" -eq 0 ] && echo "FATAL: Could not find available sriov PF's" >&2 && exit 1 + +total_pf_required=$((worker_nodes_count*PF_COUNT_PER_NODE)) +[ "$pf_count" -lt "$total_pf_required" ] && \ + echo "FATAL: there are not enough PF's on the host, try to reduce PF_COUNT_PER_NODE + Worker nodes count: $worker_nodes_count + PF per node count: $PF_COUNT_PER_NODE + Total PF count required: $total_pf_required" >&2 && exit 1 + +## Move SR-IOV Physical Functions to worker nodes +PFS_IN_USE="" +node::configure_sriov_pfs "${worker_nodes[*]}" "${pfs_names[*]}" "$PF_COUNT_PER_NODE" "PFS_IN_USE" + +## Create VFs and configure their drivers on each SR-IOV node +node::configure_sriov_vfs "${worker_nodes[*]}" "$VFS_DRIVER" "$VFS_DRIVER_KMODULE" "$VFS_COUNT" + +## Deploy Multus and SRIOV components +sriov_components::deploy_multus +sriov_components::deploy \ + "$PFS_IN_USE" \ + "$VFS_DRIVER" \ + "$SRIOVDP_RESOURCE_PREFIX" "$SRIOVDP_RESOURCE_NAME" \ + "$SRIOV_NODE_LABEL_KEY" "$SRIOV_NODE_LABEL_VALUE" + +# Verify that each sriov capable node has sriov VFs allocatable resource +validate_nodes_sriov_allocatable_resource +sriov_components::wait_pods_ready + +_kubectl get nodes +_kubectl get pods -n $SRIOV_COMPONENTS_NAMESPACE diff --git a/cluster-up/cluster/kind-1.23-sriov/conformance.json b/cluster-up/cluster/kind-1.23-sriov/conformance.json new file mode 100644 index 000000000000..2ff6e83a5bda --- /dev/null +++ b/cluster-up/cluster/kind-1.23-sriov/conformance.json @@ -0,0 +1,47 @@ +{ + "Description": "DEFAULT", + "UUID": "", + "Version": "v0.56.9", + "ResultsDir": "/tmp/sonobuoy/results", + "Resources": null, + "Filters": { + "Namespaces": ".*", + "LabelSelector": "" + }, + "Limits": { + "PodLogs": { + "Namespaces": "kube-system", + "SonobuoyNamespace": true, + "FieldSelectors": [], + "LabelSelector": "", + "Previous": false, + "SinceSeconds": null, + "SinceTime": null, + "Timestamps": false, + "TailLines": null, + "LimitBytes": null + } + }, + "QPS": 30, + "Burst": 50, + "Server": { + "bindaddress": "0.0.0.0", + "bindport": 8080, + "advertiseaddress": "", + "timeoutseconds": 21600 + }, + "Plugins": null, + "PluginSearchPath": [ + "./plugins.d", + "/etc/sonobuoy/plugins.d", + "~/sonobuoy/plugins.d" + ], + "Namespace": "sonobuoy", + "WorkerImage": "sonobuoy/sonobuoy:v0.56.9", + "ImagePullPolicy": "IfNotPresent", + "ImagePullSecrets": "", + "AggregatorPermissions": "clusterAdmin", + "ServiceAccountName": "sonobuoy-serviceaccount", + "ProgressUpdatesPort": "8099", + "SecurityContextMode": "nonroot" +} diff --git a/cluster-up/cluster/kind-1.23-sriov/provider.sh b/cluster-up/cluster/kind-1.23-sriov/provider.sh new file mode 100755 index 000000000000..4452c351164b --- /dev/null +++ b/cluster-up/cluster/kind-1.23-sriov/provider.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env bash + +set -e + +DEFAULT_CLUSTER_NAME="sriov" +DEFAULT_HOST_PORT=5000 +ALTERNATE_HOST_PORT=5001 +export CLUSTER_NAME=${CLUSTER_NAME:-$DEFAULT_CLUSTER_NAME} + +if [ $CLUSTER_NAME == $DEFAULT_CLUSTER_NAME ]; then + export HOST_PORT=$DEFAULT_HOST_PORT +else + export HOST_PORT=$ALTERNATE_HOST_PORT +fi + +function set_kind_params() { + export KIND_VERSION="${KIND_VERSION:-0.17.0}" + export KIND_NODE_IMAGE="${KIND_NODE_IMAGE:-quay.io/kubevirtci/kindest-node:v1.23.13@sha256:ef453bb7c79f0e3caba88d2067d4196f427794086a7d0df8df4f019d5e336b61}" + export KUBECTL_PATH="${KUBECTL_PATH:-/bin/kubectl}" +} + +function print_sriov_data() { + nodes=$(_kubectl get nodes -o=custom-columns=:.metadata.name | awk NF) + for node in $nodes; do + if [[ ! "$node" =~ .*"control-plane".* ]]; then + echo "Node: $node" + echo "VFs:" + ${CRI_BIN} exec $node bash -c "ls -l /sys/class/net/*/device/virtfn*" + echo "PFs PCI Addresses:" + ${CRI_BIN} exec $node bash -c "grep PCI_SLOT_NAME /sys/class/net/*/device/uevent" + fi + done +} + +function configure_registry_proxy() { + [ "$CI" != "true" ] && return + + echo "Configuring cluster nodes to work with CI mirror-proxy..." + + local -r ci_proxy_hostname="docker-mirror-proxy.kubevirt-prow.svc" + local -r kind_binary_path="${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kind" + local -r configure_registry_proxy_script="${KUBEVIRTCI_PATH}/cluster/kind/configure-registry-proxy.sh" + + KIND_BIN="$kind_binary_path" PROXY_HOSTNAME="$ci_proxy_hostname" $configure_registry_proxy_script +} + +function up() { + # print hardware info for easier debugging based on logs + echo 'Available NICs' + ${CRI_BIN} run --rm --cap-add=SYS_RAWIO quay.io/phoracek/lspci@sha256:0f3cacf7098202ef284308c64e3fc0ba441871a846022bb87d65ff130c79adb1 sh -c "lspci | egrep -i 'network|ethernet'" + echo "" + + cp $KIND_MANIFESTS_DIR/kind.yaml ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/kind.yaml + kind_up + + configure_registry_proxy + + # remove the rancher.io kind default storageClass + _kubectl delete sc standard + + ${KUBEVIRTCI_PATH}/cluster/$KUBEVIRT_PROVIDER/config_sriov_cluster.sh + + print_sriov_data + echo "$KUBEVIRT_PROVIDER cluster '$CLUSTER_NAME' is ready" +} + +set_kind_params + +source ${KUBEVIRTCI_PATH}/cluster/kind/common.sh diff --git a/cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/kustomization.yaml b/cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/kustomization.yaml new file mode 100644 index 000000000000..0c1caec1622a --- /dev/null +++ b/cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/kustomization.yaml @@ -0,0 +1,27 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: sriov +resources: +- sriov-ns.yaml +- sriov-cni-daemonset.yaml +- sriovdp-daemonset.yaml +- sriovdp-config.yaml +patchesJson6902: +- target: + group: apps + version: v1 + kind: DaemonSet + name: kube-sriov-cni-ds-amd64 + path: patch-node-selector.yaml +- target: + group: apps + version: v1 + kind: DaemonSet + name: kube-sriov-device-plugin-amd64 + path: patch-node-selector.yaml +- target: + group: apps + version: v1 + kind: DaemonSet + name: kube-sriov-device-plugin-amd64 + path: patch-sriovdp-resource-prefix.yaml diff --git a/cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/multus/kustomization.yaml b/cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/multus/kustomization.yaml new file mode 100644 index 000000000000..657061704b24 --- /dev/null +++ b/cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/multus/kustomization.yaml @@ -0,0 +1,14 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- multus.yaml +images: +- name: ghcr.io/k8snetworkplumbingwg/multus-cni + newTag: v3.8 +patchesJson6902: +- path: patch-args.yaml + target: + group: apps + version: v1 + kind: DaemonSet + name: kube-multus-ds diff --git a/cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/multus/multus.yaml b/cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/multus/multus.yaml new file mode 100644 index 000000000000..4b6b950d8597 --- /dev/null +++ b/cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/multus/multus.yaml @@ -0,0 +1,206 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: network-attachment-definitions.k8s.cni.cncf.io +spec: + group: k8s.cni.cncf.io + scope: Namespaced + names: + plural: network-attachment-definitions + singular: network-attachment-definition + kind: NetworkAttachmentDefinition + shortNames: + - net-attach-def + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + description: 'NetworkAttachmentDefinition is a CRD schema specified by the Network Plumbing + Working Group to express the intent for attaching pods to one or more logical or physical + networks. More information available at: https://github.com/k8snetworkplumbingwg/multi-net-spec' + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this represen + tation of an object. Servers should convert recognized schemas to the + latest internal value, and may reject unrecognized values. More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'NetworkAttachmentDefinition spec defines the desired state of a network attachment' + type: object + properties: + config: + description: 'NetworkAttachmentDefinition config is a JSON-formatted CNI configuration' + type: string +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: multus +rules: + - apiGroups: ["k8s.cni.cncf.io"] + resources: + - '*' + verbs: + - '*' + - apiGroups: + - "" + resources: + - pods + - pods/status + verbs: + - get + - update + - apiGroups: + - "" + - events.k8s.io + resources: + - events + verbs: + - create + - patch + - update +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: multus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: multus +subjects: +- kind: ServiceAccount + name: multus + namespace: kube-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: multus + namespace: kube-system +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: multus-cni-config + namespace: kube-system + labels: + tier: node + app: multus +data: + # NOTE: If you'd prefer to manually apply a configuration file, you may create one here. + # In the case you'd like to customize the Multus installation, you should change the arguments to the Multus pod + # change the "args" line below from + # - "--multus-conf-file=auto" + # to: + # "--multus-conf-file=/tmp/multus-conf/70-multus.conf" + # Additionally -- you should ensure that the name "70-multus.conf" is the alphabetically first name in the + # /etc/cni/net.d/ directory on each node, otherwise, it will not be used by the Kubelet. + cni-conf.json: | + { + "name": "multus-cni-network", + "type": "multus", + "capabilities": { + "portMappings": true + }, + "delegates": [ + { + "cniVersion": "0.3.1", + "name": "default-cni-network", + "plugins": [ + { + "type": "flannel", + "name": "flannel.1", + "delegate": { + "isDefaultGateway": true, + "hairpinMode": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + ], + "kubeconfig": "/etc/cni/net.d/multus.d/multus.kubeconfig" + } +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-multus-ds + namespace: kube-system + labels: + tier: node + app: multus + name: multus +spec: + selector: + matchLabels: + name: multus + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + tier: node + app: multus + name: multus + spec: + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: multus + containers: + - name: kube-multus + image: ghcr.io/k8snetworkplumbingwg/multus-cni:stable + command: ["/entrypoint.sh"] + args: + - "--multus-conf-file=auto" + - "--cni-version=0.3.1" + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: true + volumeMounts: + - name: cni + mountPath: /host/etc/cni/net.d + - name: cnibin + mountPath: /host/opt/cni/bin + - name: multus-cfg + mountPath: /tmp/multus-conf + terminationGracePeriodSeconds: 10 + volumes: + - name: cni + hostPath: + path: /etc/cni/net.d + - name: cnibin + hostPath: + path: /opt/cni/bin + - name: multus-cfg + configMap: + name: multus-cni-config + items: + - key: cni-conf.json + path: 70-multus.conf diff --git a/cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/multus/patch-args.yaml b/cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/multus/patch-args.yaml new file mode 100644 index 000000000000..ea9cd109232e --- /dev/null +++ b/cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/multus/patch-args.yaml @@ -0,0 +1,6 @@ +- op: add + path: /spec/template/spec/containers/0/args/- + value: "--multus-log-level=debug" +- op: add + path: /spec/template/spec/containers/0/args/- + value: "--multus-log-file=/var/log/multus.log" diff --git a/cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/patch-node-selector.yaml.in b/cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/patch-node-selector.yaml.in new file mode 100644 index 000000000000..0117c8cdd5be --- /dev/null +++ b/cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/patch-node-selector.yaml.in @@ -0,0 +1,3 @@ +- op: add + path: /spec/template/spec/nodeSelector/$LABEL_KEY + value: "$LABEL_VALUE" diff --git a/cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/patch-sriovdp-resource-prefix.yaml.in b/cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/patch-sriovdp-resource-prefix.yaml.in new file mode 100644 index 000000000000..563e606a9f58 --- /dev/null +++ b/cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/patch-sriovdp-resource-prefix.yaml.in @@ -0,0 +1,3 @@ +- op: add + path: /spec/template/spec/containers/0/args/-1 + value: --resource-prefix=$RESOURCE_PREFIX diff --git a/cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/sriov-cni-daemonset.yaml b/cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/sriov-cni-daemonset.yaml new file mode 100644 index 000000000000..abcb8c647cdb --- /dev/null +++ b/cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/sriov-cni-daemonset.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-sriov-cni-ds-amd64 + namespace: kube-system + labels: + tier: node + app: sriov-cni +spec: + selector: + matchLabels: + name: sriov-cni + template: + metadata: + labels: + name: sriov-cni + tier: node + app: sriov-cni + spec: + nodeSelector: + kubernetes.io/arch: amd64 + tolerations: + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + containers: + - name: kube-sriov-cni + image: ghcr.io/k8snetworkplumbingwg/sriov-cni:v2.6.2 + imagePullPolicy: IfNotPresent + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + volumeMounts: + - name: cnibin + mountPath: /host/opt/cni/bin + volumes: + - name: cnibin + hostPath: + path: /opt/cni/bin diff --git a/cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/sriov-ns.yaml b/cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/sriov-ns.yaml new file mode 100644 index 000000000000..bfe55b30d92e --- /dev/null +++ b/cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/sriov-ns.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: sriov diff --git a/cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/sriovdp-config.yaml.in b/cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/sriovdp-config.yaml.in new file mode 100644 index 000000000000..5e9788168111 --- /dev/null +++ b/cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/sriovdp-config.yaml.in @@ -0,0 +1,17 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: sriovdp-config + namespace: kube-system +data: + config.json: | + { + "resourceList": [{ + "resourceName": "$RESOURCE_NAME", + "selectors": { + "drivers": $DRIVERS, + "pfNames": $PF_NAMES + } + }] + } diff --git a/cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/sriovdp-daemonset.yaml b/cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/sriovdp-daemonset.yaml new file mode 100644 index 000000000000..322a22397742 --- /dev/null +++ b/cluster-up/cluster/kind-1.23-sriov/sriov-components/manifests/sriovdp-daemonset.yaml @@ -0,0 +1,221 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: sriov-device-plugin + namespace: kube-system + +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-sriov-device-plugin-amd64 + namespace: kube-system + labels: + tier: node + app: sriovdp +spec: + selector: + matchLabels: + name: sriov-device-plugin + template: + metadata: + labels: + name: sriov-device-plugin + tier: node + app: sriovdp + spec: + hostNetwork: true + nodeSelector: + beta.kubernetes.io/arch: amd64 + tolerations: + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + serviceAccountName: sriov-device-plugin + containers: + - name: kube-sriovdp + image: ghcr.io/k8snetworkplumbingwg/sriov-network-device-plugin:v3.4.0 + imagePullPolicy: IfNotPresent + args: + - --log-dir=sriovdp + - --log-level=10 + securityContext: + privileged: true + resources: + requests: + cpu: "250m" + memory: "40Mi" + limits: + cpu: 1 + memory: "200Mi" + volumeMounts: + - name: devicesock + mountPath: /var/lib/kubelet/ + readOnly: false + - name: log + mountPath: /var/log + - name: config-volume + mountPath: /etc/pcidp + - name: device-info + mountPath: /var/run/k8s.cni.cncf.io/devinfo/dp + volumes: + - name: devicesock + hostPath: + path: /var/lib/kubelet/ + - name: log + hostPath: + path: /var/log + - name: device-info + hostPath: + path: /var/run/k8s.cni.cncf.io/devinfo/dp + type: DirectoryOrCreate + - name: config-volume + configMap: + name: sriovdp-config + items: + - key: config.json + path: config.json + +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-sriov-device-plugin-ppc64le + namespace: kube-system + labels: + tier: node + app: sriovdp +spec: + selector: + matchLabels: + name: sriov-device-plugin + template: + metadata: + labels: + name: sriov-device-plugin + tier: node + app: sriovdp + spec: + hostNetwork: true + nodeSelector: + beta.kubernetes.io/arch: ppc64le + tolerations: + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + serviceAccountName: sriov-device-plugin + containers: + - name: kube-sriovdp + image: ghcr.io/k8snetworkplumbingwg/sriov-network-device-plugin:latest-ppc64le + imagePullPolicy: IfNotPresent + args: + - --log-dir=sriovdp + - --log-level=10 + securityContext: + privileged: true + resources: + requests: + cpu: "250m" + memory: "40Mi" + limits: + cpu: 1 + memory: "200Mi" + volumeMounts: + - name: devicesock + mountPath: /var/lib/kubelet/ + readOnly: false + - name: log + mountPath: /var/log + - name: config-volume + mountPath: /etc/pcidp + - name: device-info + mountPath: /var/run/k8s.cni.cncf.io/devinfo/dp + volumes: + - name: devicesock + hostPath: + path: /var/lib/kubelet/ + - name: log + hostPath: + path: /var/log + - name: device-info + hostPath: + path: /var/run/k8s.cni.cncf.io/devinfo/dp + type: DirectoryOrCreate + - name: config-volume + configMap: + name: sriovdp-config + items: + - key: config.json + path: config.json +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-sriov-device-plugin-arm64 + namespace: kube-system + labels: + tier: node + app: sriovdp +spec: + selector: + matchLabels: + name: sriov-device-plugin + template: + metadata: + labels: + name: sriov-device-plugin + tier: node + app: sriovdp + spec: + hostNetwork: true + nodeSelector: + beta.kubernetes.io/arch: arm64 + tolerations: + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + serviceAccountName: sriov-device-plugin + containers: + - name: kube-sriovdp + image: ghcr.io/k8snetworkplumbingwg/sriov-network-device-plugin:latest-arm64 + imagePullPolicy: IfNotPresent + args: + - --log-dir=sriovdp + - --log-level=10 + securityContext: + privileged: true + resources: + requests: + cpu: "250m" + memory: "40Mi" + limits: + cpu: 1 + memory: "200Mi" + volumeMounts: + - name: devicesock + mountPath: /var/lib/kubelet/ + readOnly: false + - name: log + mountPath: /var/log + - name: config-volume + mountPath: /etc/pcidp + - name: device-info + mountPath: /var/run/k8s.cni.cncf.io/devinfo/dp + volumes: + - name: devicesock + hostPath: + path: /var/lib/kubelet/ + - name: log + hostPath: + path: /var/log + - name: device-info + hostPath: + path: /var/run/k8s.cni.cncf.io/devinfo/dp + type: DirectoryOrCreate + - name: config-volume + configMap: + name: sriovdp-config + items: + - key: config.json + path: config.json diff --git a/cluster-up/cluster/kind-1.23-sriov/sriov-components/sriov_components.sh b/cluster-up/cluster/kind-1.23-sriov/sriov-components/sriov_components.sh new file mode 100644 index 000000000000..23d87aeee62d --- /dev/null +++ b/cluster-up/cluster/kind-1.23-sriov/sriov-components/sriov_components.sh @@ -0,0 +1,212 @@ +#!/bin/bash + +MANIFESTS_DIR="${KUBEVIRTCI_PATH}/cluster/${KUBEVIRT_PROVIDER}/sriov-components/manifests" + +KUSTOMIZE_MULTUS_DIR="${MANIFESTS_DIR}/multus" +MULTUS_MANIFEST="${CUSTOM_MANIFESTS}/multus.yaml" + +CUSTOM_MANIFESTS="${KUBEVIRTCI_CONFIG_PATH}/${KUBEVIRT_PROVIDER}/manifests" +SRIOV_COMPONENTS_MANIFEST="${CUSTOM_MANIFESTS}/sriov-components.yaml" + +SRIOV_DEVICE_PLUGIN_CONFIG_TEMPLATE="${MANIFESTS_DIR}/sriovdp-config.yaml.in" +SRIOV_DEVICE_PLUGIN_CONFIG="${CUSTOM_MANIFESTS}/sriovdp-config.yaml" + +PATCH_SRIOVDP_RESOURCE_PREFIX_TEMPLATE="${MANIFESTS_DIR}/patch-sriovdp-resource-prefix.yaml.in" +PATCH_SRIOVDP_RESOURCE_PREFIX="${CUSTOM_MANIFESTS}/patch-sriovdp-resource-prefix.yaml" + +PATCH_NODE_SELECTOR_TEMPLATE="${MANIFESTS_DIR}/patch-node-selector.yaml.in" +PATCH_NODE_SELECTOR="${CUSTOM_MANIFESTS}/patch-node-selector.yaml" + +KUBECONFIG="${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubeconfig" +KUBECTL="${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubectl --kubeconfig=${KUBECONFIG}" + +function _kubectl() { + ${KUBECTL} "$@" +} + +function _retry() { + local -r tries=$1 + local -r wait_time=$2 + local -r action=$3 + local -r wait_message=$4 + local -r waiting_action=$5 + + eval $action + local return_code=$? + for i in $(seq $tries); do + if [[ $return_code -ne 0 ]]; then + echo "[$i/$tries] $wait_message" + eval $waiting_action + sleep $wait_time + eval $action + return_code=$? + else + return 0 + fi + done + + return 1 +} + +function _check_all_pods_ready() { + all_pods_ready_condition=$(_kubectl get pods -A --no-headers -o custom-columns=':.status.conditions[?(@.type == "Ready")].status') + if [ "$?" -eq 0 ]; then + pods_not_ready_count=$(grep -cw False <<<"$all_pods_ready_condition") + if [ "$pods_not_ready_count" -eq 0 ]; then + return 0 + fi + fi + + return 1 +} + +# not using kubectl wait since with the sriov operator the pods get restarted a couple of times and this is +# more reliable +function sriov_components::wait_pods_ready() { + local -r tries=30 + local -r wait_time=10 + + local -r wait_message="Waiting for all pods to become ready.." + local -r error_message="Not all pods were ready after $(($tries * $wait_time)) seconds" + + local -r get_pods='_kubectl get pods --all-namespaces' + local -r action="_check_all_pods_ready" + + set +x + trap "set -x" RETURN + + if ! _retry "$tries" "$wait_time" "$action" "$wait_message" "$get_pods"; then + echo $error_message + return 1 + fi + + echo "all pods are ready" + return 0 +} + +function sriov_components::wait_allocatable_resource() { + local -r node=$1 + local resource_name=$2 + local -r expected_value=$3 + + local -r tries=48 + local -r wait_time=10 + + local -r wait_message="wait for $node node to have allocatable resource: $resource_name: $expected_value" + local -r error_message="node $node doesnt have allocatable resource $resource_name:$expected_value" + + # it is necessary to add '\' before '.' in the resource name. + resource_name=$(echo $resource_name | sed s/\\./\\\\\./g) + local -r action='_kubectl get node $node -ocustom-columns=:.status.allocatable.$resource_name --no-headers | grep -w $expected_value' + + if ! _retry $tries $wait_time "$action" "$wait_message"; then + echo $error_message + return 1 + fi + + return 0 +} + +function sriov_components::deploy_multus() { + _kubectl kustomize "$KUSTOMIZE_MULTUS_DIR" > "$MULTUS_MANIFEST" + + echo "Deploying Multus:" + cat "$MULTUS_MANIFEST" + + _kubectl apply -f "$MULTUS_MANIFEST" + + return 0 +} + +function sriov_components::deploy() { + local -r pf_names=$1 + local -r drivers=$2 + local -r resource_prefix=$3 + local -r resource_name=$4 + local -r label_key=$5 + local -r label_value=$6 + + _create_custom_manifests_dir + _prepare_node_selector_patch "$label_key" "$label_value" + _prepare_sriovdp_resource_prefix_patch "$resource_prefix" + _prepare_device_plugin_config \ + "$pf_names" \ + "$resource_name" \ + "$drivers" + _deploy_sriov_components + + return 0 +} + +function _create_custom_manifests_dir() { + mkdir -p "$CUSTOM_MANIFESTS" + + cp -f $(find "$MANIFESTS_DIR"/*.yaml) "$CUSTOM_MANIFESTS" + + return 0 +} + +function _prepare_node_selector_patch() { + local -r label_key=$1 + local -r label_value=$2 + + ( + export LABEL_KEY=$label_key + export LABEL_VALUE=$label_value + envsubst < "$PATCH_NODE_SELECTOR_TEMPLATE" > "$PATCH_NODE_SELECTOR" + ) +} + +function _prepare_sriovdp_resource_prefix_patch() { + local -r resource_prefix=$1 + + ( + export RESOURCE_PREFIX=$resource_prefix + envsubst < "$PATCH_SRIOVDP_RESOURCE_PREFIX_TEMPLATE" > "$PATCH_SRIOVDP_RESOURCE_PREFIX" + ) +} + +function _prepare_device_plugin_config() { + local -r pf_names=$1 + local -r resource_name=$2 + local -r drivers=$3 + + ( + export RESOURCE_NAME=$resource_name + export DRIVERS=$(_format_json_array "$drivers") + export PF_NAMES=$(_format_json_array "$pf_names") + envsubst < "$SRIOV_DEVICE_PLUGIN_CONFIG_TEMPLATE" > "$SRIOV_DEVICE_PLUGIN_CONFIG" + ) + + return 0 +} + +function _format_json_array() { + local -r string=$1 + + local json_array="$string" + # Replace all spaces with ",": aa bb -> aa","bb + local -r replace='","' + json_array="${json_array// /$replace}" + + # Add opening quotes for first element, and closing quotes for last element + # aa","bb -> "aa","bb" + json_array="\"${json_array}\"" + + # Add brackets: "aa","bb" -> ["aa","bb"] + json_array="[${json_array}]" + + echo "$json_array" +} + +function _deploy_sriov_components() { + _kubectl kustomize "$CUSTOM_MANIFESTS" >"$SRIOV_COMPONENTS_MANIFEST" + + echo "Deploying SRIOV components:" + cat "$SRIOV_COMPONENTS_MANIFEST" + + _kubectl apply -f "$SRIOV_COMPONENTS_MANIFEST" + + return 0 +} + diff --git a/cluster-up/cluster/kind-1.23-sriov/sriov-node/configure_vfs.sh b/cluster-up/cluster/kind-1.23-sriov/sriov-node/configure_vfs.sh new file mode 100755 index 000000000000..0312a9759116 --- /dev/null +++ b/cluster-up/cluster/kind-1.23-sriov/sriov-node/configure_vfs.sh @@ -0,0 +1,103 @@ +#! /bin/bash + +set -ex + +function configure_vf_driver() { + local -r vf_sys_device=$1 + local -r driver=$2 + + vf_pci_address=$(basename $vf_sys_device) + # Check if a VF is bound to a different driver + if [ -d "$vf_sys_device/driver" ]; then + vf_bus_pci_device_driver=$(readlink -e $vf_sys_device/driver) + vf_driver_name=$(basename $vf_bus_pci_device_driver) + + # Check if VF already configured with supported driver + if [[ $vf_driver_name == $driver ]]; then + return + else + echo "Unbind VF $vf_pci_address from $vf_driver_name driver" + echo "$vf_pci_address" >> "$vf_bus_pci_device_driver/unbind" + fi + fi + + echo "Bind VF $vf_pci_address to $driver driver" + echo "$driver" >> "$vf_sys_device/driver_override" + echo "$vf_pci_address" >> "/sys/bus/pci/drivers/$driver/bind" + echo "" >> "$vf_sys_device/driver_override" + + return 0 +} + +function create_vfs() { + local -r pf_net_device=$1 + local -r vfs_count=$2 + + local -r pf_name=$(basename $pf_net_device) + local -r pf_sys_device=$(readlink -e $pf_net_device) + + local -r sriov_totalvfs_content=$(cat $pf_sys_device/sriov_totalvfs) + [ $sriov_totalvfs_content -lt $vfs_count ] && \ + echo "FATAL: PF $pf_name, VF's count should be up to sriov_totalvfs: $sriov_totalvfs_content" >&2 && return 1 + + echo "Creating $vfs_count VFs on PF $pf_name " + echo 0 >> "$pf_sys_device/sriov_numvfs" + echo "$vfs_count" >> "$pf_sys_device/sriov_numvfs" + sleep 3 + + return 0 +} + +function validate_run_with_sudo() { + [ "$(id -u)" -ne 0 ] && echo "FATAL: This script requires sudo privileges" >&2 && return 1 + + return 0 +} + +function validate_sysfs_mount_as_rw() { + local -r sysfs_permissions=$(grep -Po 'sysfs.*\K(ro|rw)' /proc/mounts) + [ "$sysfs_permissions" != rw ] && echo "FATAL: sysfs is read-only, try to remount as RW" >&2 && return 1 + + return 0 +} + +function ensure_driver_is_loaded() { + local -r driver_name=$1 + local -r module_name=$2 + + if ! grep "$module_name" /proc/modules; then + if ! modprobe "$driver_name"; then + echo "FATAL: failed to load $DRIVER kernel module $DRIVER_KMODULE" >&2 && return 1 + fi + fi + + return 0 +} + +DRIVER="${DRIVER:-vfio-pci}" +DRIVER_KMODULE="${DRIVER_KMODULE:-vfio_pci}" +VFS_COUNT=${VFS_COUNT:-6} + +[ $((VFS_COUNT)) -lt 1 ] && echo "INFO: VFS_COUNT is lower then 1, nothing to do..." && exit 0 + +validate_run_with_sudo +validate_sysfs_mount_as_rw +ensure_driver_is_loaded $DRIVER $DRIVER_KMODULE + +sriov_pfs=( $(find /sys/class/net/*/device/sriov_numvfs) ) +[ "${#sriov_pfs[@]}" -eq 0 ] && echo "FATAL: Could not find available sriov PFs" >&2 && exit 1 + +for pf_name in $sriov_pfs; do + pf_device=$(dirname "$pf_name") + + echo "Create VF's" + create_vfs "$pf_device" "$VFS_COUNT" + + echo "Configuring VF's drivers" + # /sys/class/net//device/virtfn* + vfs_sys_devices=$(readlink -e $pf_device/virtfn*) + for vf in $vfs_sys_devices; do + configure_vf_driver "$vf" $DRIVER + ls -l "$vf/driver" + done +done diff --git a/cluster-up/cluster/kind-1.23-sriov/sriov-node/node.sh b/cluster-up/cluster/kind-1.23-sriov/sriov-node/node.sh new file mode 100644 index 000000000000..8d1a997c2481 --- /dev/null +++ b/cluster-up/cluster/kind-1.23-sriov/sriov-node/node.sh @@ -0,0 +1,124 @@ +#!/bin/bash + +SCRIPT_PATH=${SCRIPT_PATH:-$(dirname "$(realpath "$0")")} + +CONFIGURE_VFS_SCRIPT_PATH="${SCRIPT_PATH}/configure_vfs.sh" +PFS_IN_USE=${PFS_IN_USE:-} + +function node::discover_host_pfs() { + local -r sriov_pfs=( $(find /sys/class/net/*/device/sriov_numvfs) ) + [ "${#sriov_pfs[@]}" -eq 0 ] && echo "FATAL: Could not find available sriov PFs on host" >&2 && return 1 + + local pf_name + local pf_names=() + for pf in "${sriov_pfs[@]}"; do + pf_name="${pf%%/device/*}" + pf_name="${pf_name##*/}" + if [ $(echo "${PF_BLACKLIST[@]}" | grep "${pf_name}") ]; then + continue + fi + + pfs_names+=( $pf_name ) + done + + echo "${pfs_names[@]}" +} + +# node::configure_sriov_pfs moves SR-IOV PFs to nodes netns. +# It exports 'PFS_IN_USE' environment variable with a list +# of SR-IOV PFs that been moved to the nodes netns. +function node::configure_sriov_pfs() { + local -r nodes_array=($1) + local -r pfs_names_array=($2) + local -r pf_count_per_node=$3 + local -r pfs_in_use_var_name=$4 + + local pfs_to_move=() + local pfs_array_offset=0 + local pfs_in_use=() + local node_exec + + # 'iplink' learns which network namespaces there are by checking /var/run/netns + mkdir -p /var/run/netns + for node in "${nodes_array[@]}"; do + prepare_node_netns "$node" + + ## Move PF's to node netns + # Slice '$pfs_names_array' to have unique silce for each node + # with '$pf_count_per_node' PF's names + pfs_to_move=( "${pfs_names_array[@]:$pfs_array_offset:$pf_count_per_node}" ) + echo "Moving '${pfs_to_move[*]}' PF's to '$node' netns" + for pf_name in "${pfs_to_move[@]}"; do + move_pf_to_node_netns "$node" "$pf_name" + done + # Increment the offset for next slice + pfs_array_offset=$((pfs_array_offset + pf_count_per_node)) + pfs_in_use+=( $pf_name ) + + # KIND mounts sysfs as read-only by default, remount as R/W" + node_exec="${CRI_BIN} exec $node" + $node_exec mount -o remount,rw /sys + + ls_node_dev_vfio="${node_exec} ls -la -Z /dev/vfio" + $ls_node_dev_vfio + $node_exec chmod 0666 /dev/vfio/vfio + $ls_node_dev_vfio + + _kubectl label node $node $SRIOV_NODE_LABEL + done + + # Set new variable with the used PF names that will consumed by the caller + eval $pfs_in_use_var_name="'${pfs_in_use[*]}'" +} + +# node::configure_sriov_vfs create SR-IOV VFs and configure their driver on each node. +function node::configure_sriov_vfs() { + local -r nodes_array=($1) + local -r driver=$2 + local -r driver_kmodule=$3 + local -r vfs_count=$4 + + local -r config_vf_script=$(basename "$CONFIGURE_VFS_SCRIPT_PATH") + + for node in "${nodes_array[@]}"; do + ${CRI_BIN} cp "$CONFIGURE_VFS_SCRIPT_PATH" "$node:/" + ${CRI_BIN} exec "$node" bash -c "DRIVER=$driver DRIVER_KMODULE=$driver_kmodule VFS_COUNT=$vfs_count ./$config_vf_script" + ${CRI_BIN} exec "$node" ls -la -Z /dev/vfio + done +} + +function prepare_node_netns() { + local -r node_name=$1 + local -r node_pid=$($CRI_BIN inspect -f '{{.State.Pid}}' "$node_name") + + # Docker does not create the required symlink for a container netns + # it perverts iplink from learning that container netns. + # Thus it is necessary to create symlink between the current + # worker node (container) netns to /var/run/netns (consumed by iplink) + # Now the node container netns named with the node name will be visible. + ln -sf "/proc/$node_pid/ns/net" "/var/run/netns/$node_name" +} + +function move_pf_to_node_netns() { + local -r node_name=$1 + local -r pf_name=$2 + + # Move PF to node network-namespace + ip link set "$pf_name" netns "$node_name" + # Ensure current PF is up + ip netns exec "$node_name" ip link set up dev "$pf_name" + ip netns exec "$node_name" ip link show +} + +function node::total_vfs_count() { + local -r node_name=$1 + local -r node_pid=$($CRI_BIN inspect -f '{{.State.Pid}}' "$node_name") + local -r pfs_sriov_numvfs=( $(cat /proc/$node_pid/root/sys/class/net/*/device/sriov_numvfs) ) + local total_vfs_on_node=0 + + for num_vfs in "${pfs_sriov_numvfs[@]}"; do + total_vfs_on_node=$((total_vfs_on_node + num_vfs)) + done + + echo "$total_vfs_on_node" +} diff --git a/cluster-up/cluster/kind-1.23-sriov/sriovdp_setup.sh b/cluster-up/cluster/kind-1.23-sriov/sriovdp_setup.sh new file mode 100755 index 000000000000..2eed8318f248 --- /dev/null +++ b/cluster-up/cluster/kind-1.23-sriov/sriovdp_setup.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +get_sriov_pci_root_addresses() { + for dir in $(find /sys/devices/ -name sriov_totalvfs -exec dirname {} \;); do + if [ $(cat $dir/sriov_numvfs) -gt 0 ]; then + # use perl because sed doesn't support non-greedy matching + basename $dir | perl -pe 's|(.*?:)(.*)|\2|' + fi + done +} + +create_pci_string() { + local quoted_values=($(echo "${pci_addresses[@]}" | xargs printf "\"%s\" " )) + local quoted_as_string=${quoted_values[@]} + if [ "$quoted_as_string" = "\"\"" ]; then + pci_string="" + else + pci_string=${quoted_as_string// /, } + fi +} + +sriov_device_plugin() { + pci_addresses=$(get_sriov_pci_root_addresses) + create_pci_string + + cat < /etc/pcidp/config.json +{ + "resourceList": + [ + { + "resourceName": "sriov", + "rootDevices": [$pci_string], + "sriovMode": true, + "deviceType": "vfio" + } + ] +} +EOF +} + +mkdir -p /etc/pcidp +sriov_device_plugin diff --git a/cluster-up/hack/common.sh b/cluster-up/hack/common.sh index 7411d61276cb..887196111ded 100644 --- a/cluster-up/hack/common.sh +++ b/cluster-up/hack/common.sh @@ -17,7 +17,7 @@ fi KUBEVIRTCI_CLUSTER_PATH=${KUBEVIRTCI_CLUSTER_PATH:-${KUBEVIRTCI_PATH}/cluster} -KUBEVIRT_PROVIDER=${KUBEVIRT_PROVIDER:-k8s-1.22} +KUBEVIRT_PROVIDER=${KUBEVIRT_PROVIDER:-k8s-1.25} KUBEVIRT_NUM_NODES=${KUBEVIRT_NUM_NODES:-1} KUBEVIRT_MEMORY_SIZE=${KUBEVIRT_MEMORY_SIZE:-5120M} KUBEVIRT_NUM_SECONDARY_NICS=${KUBEVIRT_NUM_SECONDARY_NICS:-0} @@ -43,4 +43,4 @@ provider_prefix=${JOB_NAME:-${KUBEVIRT_PROVIDER}}${EXECUTOR_NUMBER} job_prefix=${JOB_NAME:-kubevirt}${EXECUTOR_NUMBER} mkdir -p $KUBEVIRTCI_CONFIG_PATH/$KUBEVIRT_PROVIDER -KUBEVIRTCI_TAG=2210211528-cd36fcc +KUBEVIRTCI_TAG=2211212125-021efaa diff --git a/cluster-up/version.txt b/cluster-up/version.txt index f69aeb24f1d1..99609e7512f1 100644 --- a/cluster-up/version.txt +++ b/cluster-up/version.txt @@ -1 +1 @@ -2210211528-cd36fcc +2211212125-021efaa diff --git a/hack/config-default.sh b/hack/config-default.sh index bd2a29889f4c..55d07133140e 100644 --- a/hack/config-default.sh +++ b/hack/config-default.sh @@ -11,7 +11,7 @@ cdi_namespace=cdi image_pull_policy=${IMAGE_PULL_POLICY:-IfNotPresent} verbosity=${VERBOSITY:-2} package_name=${PACKAGE_NAME:-kubevirt-dev} -kubevirtci_git_hash="2210211528-cd36fcc" +kubevirtci_git_hash="2211212125-021efaa" conn_check_ipv4_address=${CONN_CHECK_IPV4_ADDRESS:-""} conn_check_ipv6_address=${CONN_CHECK_IPV6_ADDRESS:-""} conn_check_dns=${CONN_CHECK_DNS:-""}