diff --git a/CHANGELOG.md b/CHANGELOG.md index efd7349f24..f2a4d488f7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -66,6 +66,11 @@ nodes. This failover is not managed by MetalK8s. (PR[#3415](https://github.com/scality/metalk8s/pull/3415)) +- [#2381](https://github.com/scality/metalk8s/issues/2381)) - Allow + MetalK8s to manage Control Plane Ingress Virtual IP using MetalLB if + it possible in the user environment + (PR[#3418](https://github.com/scality/metalk8s/pull/3418)) + ### Breaking changes - [#2199](https://github.com/scality/metalk8s/issues/2199) - Prometheus label diff --git a/buildchain/buildchain/constants.py b/buildchain/buildchain/constants.py index 59b376436b..cd8db74f0a 100644 --- a/buildchain/buildchain/constants.py +++ b/buildchain/buildchain/constants.py @@ -17,6 +17,7 @@ CMD_WIDTH: int = 14 # URLs of the main container repositories. +BITNAMI_REPOSITORY: str = "docker.io/bitnami" CALICO_REPOSITORY: str = "docker.io/calico" COREDNS_REPOSITORY: str = "k8s.gcr.io/coredns" COREOS_REPOSITORY: str = "quay.io/coreos" diff --git a/buildchain/buildchain/image.py b/buildchain/buildchain/image.py index 756f6e05d6..a1429e1407 100644 --- a/buildchain/buildchain/image.py +++ b/buildchain/buildchain/image.py @@ -174,6 +174,7 @@ def _operator_image(name: str, **kwargs: Any) -> targets.OperatorImage: TO_PULL: List[targets.RemoteImage] = [] IMGS_PER_REPOSITORY: Dict[str, List[str]] = { + constants.BITNAMI_REPOSITORY: ["metallb-controller", "metallb-speaker"], constants.CALICO_REPOSITORY: [ "calico-node", "calico-kube-controllers", diff --git a/buildchain/buildchain/salt_tree.py b/buildchain/buildchain/salt_tree.py index cd3f3e8365..86ab3eea62 100644 --- a/buildchain/buildchain/salt_tree.py +++ b/buildchain/buildchain/salt_tree.py @@ -429,7 +429,15 @@ def _get_parts(self) -> Iterator[str]: "salt/metalk8s/addons/nginx-ingress-control-plane/deployed/", "chart-daemonset.sls", ), + Path( + "salt/metalk8s/addons/nginx-ingress-control-plane/deployed/", + "chart-deployment.sls", + ), Path("salt/metalk8s/addons/nginx-ingress-control-plane/deployed/tls-secret.sls"), + Path("salt/metalk8s/addons/metallb/deployed/chart.sls"), + Path("salt/metalk8s/addons/metallb/deployed/config.sls"), + Path("salt/metalk8s/addons/metallb/deployed/init.sls"), + Path("salt/metalk8s/addons/metallb/deployed/namespace.sls"), Path("salt/metalk8s/beacon/certificates.sls"), Path("salt/metalk8s/container-engine/containerd/configured.sls"), Path("salt/metalk8s/container-engine/containerd/files/50-metalk8s.conf.j2"), diff --git a/buildchain/buildchain/versions.py b/buildchain/buildchain/versions.py index 7e6a80e258..300835f9d3 100644 --- a/buildchain/buildchain/versions.py +++ b/buildchain/buildchain/versions.py @@ -175,6 +175,16 @@ def _version_prefix(version: str, prefix: str = "v") -> str: version="v1.1.2", digest="sha256:22fbde17ab647ddf89841e5e464464eece111402b7d599882c2a3393bc0d2810", ), + Image( + name="metallb-controller", + version="0.9.6-debian-10-r52", + digest="sha256:a493e311beb663c7282fe6c3712899ab6fb7bd0ad4a38388ea1b97d7d735ff8a", + ), + Image( + name="metallb-speaker", + version="0.9.6-debian-10-r54", + digest="sha256:27f94679069f184d9bc67f6d5eccfc961be7588a73a066a8a4f87b6b3ef75614", + ), Image( name="pause", version="3.2", diff --git a/charts/ingress-nginx-control-plane-deployment.yaml b/charts/ingress-nginx-control-plane-deployment.yaml new file mode 100644 index 0000000000..2e464c0a73 --- /dev/null +++ b/charts/ingress-nginx-control-plane-deployment.yaml @@ -0,0 +1,76 @@ +controller: + image: + digest: null + repository: '__image__(nginx-ingress-controller)' + + defaultBackendService: 'metalk8s-ingress/nginx-ingress-default-backend' + + electionID: ingress-control-plane-controller-leader + + ingressClass: nginx-control-plane + + admissionWebhooks: + enabled: false + + kind: Deployment + + replicaCount: 2 + + minAvailable: 0 + + updateStrategy: + type: RollingUpdate + + tolerations: + - key: "node-role.kubernetes.io/bootstrap" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/infra" + operator: "Exists" + effect: "NoSchedule" + + nodeSelector: + node-role.kubernetes.io/master: '' + + service: + loadBalancerIP: '__var__(salt.metalk8s_network.get_control_plane_ingress_ip())' + externalTrafficPolicy: Local + + enableHttp: false + + ports: + https: 8443 + + extraArgs: + default-ssl-certificate: "metalk8s-ingress/ingress-control-plane-default-certificate" + metrics-per-host: false + + metrics: + enabled: true + serviceMonitor: + enabled: true + additionalLabels: + metalk8s.scality.com/monitor: '' + +defaultBackend: + enabled: true + + image: + repository: '__image__(nginx-ingress-defaultbackend-amd64)' + + tolerations: + - key: "node-role.kubernetes.io/bootstrap" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/infra" + operator: "Exists" + effect: "NoSchedule" + + nodeSelector: + node-role.kubernetes.io/master: '' diff --git a/charts/metallb.yaml b/charts/metallb.yaml new file mode 100644 index 0000000000..b1995b776d --- /dev/null +++ b/charts/metallb.yaml @@ -0,0 +1,42 @@ +existingConfigMap: metallb-config + +controller: + image: + registry: null + repository: '__image__(metallb-controller)' + + nodeSelector: + node-role.kubernetes.io/master: '' + + tolerations: + - key: "node-role.kubernetes.io/bootstrap" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/infra" + operator: "Exists" + effect: "NoSchedule" + + podAnnotations: + checksum/config: '__slot__:salt:metalk8s_kubernetes.get_object_digest(kind="ConfigMap", apiVersion="v1", namespace="metalk8s-loadbalancing", name="metallb-config", path="data:config")' + +speaker: + image: + registry: null + repository: '__image__(metallb-speaker)' + + nodeSelector: + node-role.kubernetes.io/master: '' + + tolerations: + - key: "node-role.kubernetes.io/bootstrap" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/infra" + operator: "Exists" + effect: "NoSchedule" diff --git a/docs/installation/bootstrap.rst b/docs/installation/bootstrap.rst index a2bb7ef335..c3c7b1ac10 100644 --- a/docs/installation/bootstrap.rst +++ b/docs/installation/bootstrap.rst @@ -49,6 +49,8 @@ Configuration cidr: ingress: ip: + metalLB: + enabled: workloadPlane: cidr: mtu: @@ -92,6 +94,16 @@ notation for it's various subfields. that if you lose the Bootstrap node, you no longer have access to any control plane component). + This ``ip`` for ``ingress`` can be managed by MetalK8s directly if + it's possible in your environment, to do so we use + `MetalLB `_ that allow to manage this + Virtual IP directly on Layer2 using only + `ARP `_ + requests, in order to be able to use MetalLB your network need to + properly broadcast ARP requests so that Control Plane node hosting + the Virtual IP can answer to this ARP request. + When MetalLB is enabled this ingress IP is mandatory. + For ``workloadPlane`` entry an `MTU `_ can also be provided, this MTU value should be the lowest MTU value accross diff --git a/docs/operation/changing_control_plane_ingress_ip.rst b/docs/operation/changing_control_plane_ingress_ip.rst index 0a586272f8..32457ee0c7 100644 --- a/docs/operation/changing_control_plane_ingress_ip.rst +++ b/docs/operation/changing_control_plane_ingress_ip.rst @@ -1,6 +1,15 @@ Changing the Control Plane Ingress IP ===================================== +This procedure describes how to change the Control Plane Ingress IP, and +to enable (or disable) MetalLB management of this IP. + +.. note:: + + Disabling MetalLB using this procedure does **not** remove MetalLB, + it simply disables its use for managing the ``LoadBalancer`` *Service* + for MetalK8s Control Plane Ingress. + #. On the Bootstrap node, update the ``ip`` field from ``networks.controlPlane.ingress`` in the Bootstrap configuration file. (refer to :ref:`Bootstrap Configuration`) @@ -18,6 +27,16 @@ Changing the Control Plane Ingress IP $ salt-call metalk8s_network.get_control_plane_ingress_ip local: + $ salt-call pillar.get networks:control_plane + local: + ---------- + cidr: + - + ingress: + ip: + + metalLB: + enabled: #. On the Bootstrap node, reconfigure apiServer: diff --git a/eve/main.yml b/eve/main.yml index ab4297e163..0c4493e802 100644 --- a/eve/main.yml +++ b/eve/main.yml @@ -356,6 +356,10 @@ models: networks: controlPlane: cidr: 192.168.1.0/24 + metalLB: + enabled: true + ingress: + ip: 192.168.1.254 workloadPlane: cidr: 192.168.2.0/24 ca: @@ -442,6 +446,7 @@ models: TEST_HOSTS_LIST: "bootstrap" PYTEST_FILTERS: "post and ci" BOOTSTRAP_BACKUP_ARCHIVE: "" + CONTROL_PLANE_INGRESS_VIP: "192.168.1.253" command: > ssh -F ssh_config bastion -- "cd metalk8s && @@ -480,7 +485,7 @@ models: name: Run UI tests on Bastion env: &_env_bastion_ui_tests TEST_FILTER: "e2e" - TARGET_URL: "https://%(prop:bootstrap_control_plane_ip)s:8443" + TARGET_URL: "https://192.168.1.254:8443" command: > ssh -F ssh_config bastion -- "cd metalk8s/ui && diff --git a/salt/_pillar/metalk8s.py b/salt/_pillar/metalk8s.py index cce2edb1f7..41100a806e 100644 --- a/salt/_pillar/metalk8s.py +++ b/salt/_pillar/metalk8s.py @@ -1,6 +1,7 @@ import logging from collections import Mapping +import salt.utils.dictupdate import salt.utils.files import salt.utils.yaml @@ -90,6 +91,17 @@ def _load_networks(config_data): if not isinstance(networks_data[net]["cidr"], list): networks_data[net]["cidr"] = [networks_data[net]["cidr"]] + # MetalLB disabled by default + networks_data["controlPlane"].setdefault("metalLB", {}).setdefault("enabled", False) + + if networks_data["controlPlane"]["metalLB"]["enabled"] and not networks_data[ + "controlPlane" + ].get("ingress", {}).get("ip"): + errors.append( + "'ip' for 'ingress' in 'controlPlane' network is mandatory when 'metalLB'" + "is enabled" + ) + if errors: return __utils__["pillar_utils.errors_to_dict"](errors) diff --git a/salt/metalk8s/addons/metallb/deployed/chart.sls b/salt/metalk8s/addons/metallb/deployed/chart.sls new file mode 100644 index 0000000000..af0e21d78c --- /dev/null +++ b/salt/metalk8s/addons/metallb/deployed/chart.sls @@ -0,0 +1,485 @@ +#!jinja | metalk8s_kubernetes + +{%- from "metalk8s/repo/macro.sls" import build_image_name with context %} + + + +{% raw %} + +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: metallb + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: metallb + app.kubernetes.io/part-of: metalk8s + helm.sh/chart: metallb-2.4.0 + heritage: metalk8s + name: metallb-controller + namespace: metalk8s-loadbalancing +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: speaker + app.kubernetes.io/instance: metallb + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: metallb + app.kubernetes.io/part-of: metalk8s + helm.sh/chart: metallb-2.4.0 + heritage: metalk8s + name: metallb-speaker + namespace: metalk8s-loadbalancing +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: metallb + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: metallb + app.kubernetes.io/part-of: metalk8s + helm.sh/chart: metallb-2.4.0 + heritage: metalk8s + name: metallb-controller + namespace: metalk8s-loadbalancing +rules: +- apiGroups: + - '' + resources: + - services + verbs: + - get + - list + - watch + - update +- apiGroups: + - '' + resources: + - services/status + verbs: + - update +- apiGroups: + - '' + resources: + - events + verbs: + - create + - patch +- apiGroups: + - policy + resourceNames: + - metallb-controller + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: speaker + app.kubernetes.io/instance: metallb + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: metallb + app.kubernetes.io/part-of: metalk8s + helm.sh/chart: metallb-2.4.0 + heritage: metalk8s + name: metallb-speaker + namespace: metalk8s-loadbalancing +rules: +- apiGroups: + - '' + resources: + - services + - endpoints + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - '' + resources: + - events + verbs: + - create + - patch +- apiGroups: + - policy + resourceNames: + - metallb-speaker + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: metallb + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: metallb + app.kubernetes.io/part-of: metalk8s + helm.sh/chart: metallb-2.4.0 + heritage: metalk8s + name: metallb-controller + namespace: metalk8s-loadbalancing +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metallb-controller +subjects: +- kind: ServiceAccount + name: metallb-controller + namespace: metalk8s-loadbalancing +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: speaker + app.kubernetes.io/instance: metallb + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: metallb + app.kubernetes.io/part-of: metalk8s + helm.sh/chart: metallb-2.4.0 + heritage: metalk8s + name: metallb-speaker + namespace: metalk8s-loadbalancing +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metallb-speaker +subjects: +- kind: ServiceAccount + name: metallb-speaker + namespace: metalk8s-loadbalancing +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/instance: metallb + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: metallb + app.kubernetes.io/part-of: metalk8s + helm.sh/chart: metallb-2.4.0 + heritage: metalk8s + name: metallb-config-watcher + namespace: metalk8s-loadbalancing +rules: +- apiGroups: + - '' + resources: + - configmaps + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/component: speaker + app.kubernetes.io/instance: metallb + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: metallb + app.kubernetes.io/part-of: metalk8s + helm.sh/chart: metallb-2.4.0 + heritage: metalk8s + name: metallb-pod-lister + namespace: metalk8s-loadbalancing +rules: +- apiGroups: + - '' + resources: + - pods + verbs: + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/instance: metallb + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: metallb + app.kubernetes.io/part-of: metalk8s + helm.sh/chart: metallb-2.4.0 + heritage: metalk8s + name: metallb-config-watcher + namespace: metalk8s-loadbalancing +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: metallb-config-watcher +subjects: +- kind: ServiceAccount + name: metallb-controller +- kind: ServiceAccount + name: metallb-speaker +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: speaker + app.kubernetes.io/instance: metallb + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: metallb + app.kubernetes.io/part-of: metalk8s + helm.sh/chart: metallb-2.4.0 + heritage: metalk8s + name: metallb-pod-lister + namespace: metalk8s-loadbalancing +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: metallb-pod-lister +subjects: +- kind: ServiceAccount + name: metallb-speaker +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + app.kubernetes.io/component: speaker + app.kubernetes.io/instance: metallb + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: metallb + app.kubernetes.io/part-of: metalk8s + helm.sh/chart: metallb-2.4.0 + heritage: metalk8s + name: metallb-speaker + namespace: metalk8s-loadbalancing +spec: + selector: + matchLabels: + app.kubernetes.io/component: speaker + app.kubernetes.io/instance: metallb + app.kubernetes.io/name: metallb + template: + metadata: + labels: + app.kubernetes.io/component: speaker + app.kubernetes.io/instance: metallb + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: metallb + app.kubernetes.io/part-of: metalk8s + helm.sh/chart: metallb-2.4.0 + heritage: metalk8s + spec: + containers: + - args: + - --port=7472 + - --config=metallb-config + env: + - name: METALLB_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: METALLB_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: METALLB_ML_BIND_ADDR + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: METALLB_ML_LABELS + value: app.kubernetes.io/name=metallb,app.kubernetes.io/instance=metallb,app.kubernetes.io/component=speaker + - name: METALLB_ML_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: METALLB_ML_SECRET_KEY + valueFrom: + secretKeyRef: + key: secretkey + name: metallb-memberlist + image: {% endraw -%}{{ build_image_name("metallb-speaker", False) }}{%- raw %}:0.9.6-debian-10-r54 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + name: metallb-speaker + ports: + - containerPort: 7472 + name: metrics + readinessProbe: + failureThreshold: 3 + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + - SYS_ADMIN + drop: + - ALL + readOnlyRootFilesystem: true + runAsUser: 0 + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + node-role.kubernetes.io/master: '' + serviceAccountName: metallb-speaker + terminationGracePeriodSeconds: 2 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/bootstrap + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: metallb + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: metallb + app.kubernetes.io/part-of: metalk8s + helm.sh/chart: metallb-2.4.0 + heritage: metalk8s + name: metallb-controller + namespace: metalk8s-loadbalancing +spec: + revisionHistoryLimit: 3 + selector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: metallb + app.kubernetes.io/name: metallb + template: + metadata: + annotations: + checksum/config: __slot__:salt:metalk8s_kubernetes.get_object_digest(kind="ConfigMap", + apiVersion="v1", namespace="metalk8s-loadbalancing", name="metallb-config", + path="data:config") + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: metallb + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: metallb + app.kubernetes.io/part-of: metalk8s + helm.sh/chart: metallb-2.4.0 + heritage: metalk8s + spec: + affinity: + nodeAffinity: null + podAffinity: null + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: metallb + app.kubernetes.io/name: metallb + namespaces: + - metalk8s-loadbalancing + topologyKey: kubernetes.io/hostname + weight: 1 + containers: + - args: + - --port=7472 + - --config=metallb-config + image: {% endraw -%}{{ build_image_name("metallb-controller", False) }}{%- raw %}:0.9.6-debian-10-r52 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + name: metallb-controller + ports: + - containerPort: 7472 + name: metrics + readinessProbe: + failureThreshold: 3 + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + nodeSelector: + kubernetes.io/os: linux + node-role.kubernetes.io/master: '' + securityContext: + fsGroup: 1001 + runAsNonRoot: true + runAsUser: 1001 + serviceAccountName: metallb-controller + terminationGracePeriodSeconds: 0 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/bootstrap + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists +--- +apiVersion: v1 +data: + secretkey: UWtTV05ubjNTbTI4RjZSeUVhUXhGOUdUcWVPSDZMT2puVXRyTVFGZ0ZzcktDd3U4U2NzWWVlWW1vMTMzaUoyclVyMlV6NDk0ZXJId2VMeFRIRmpGVW85aGxEWExiV284VWlrRVA1WXlZa1lHN2tVUEFkM0xqZXpZcWRIOHlTYnlQWVNlRGlTRGJqczl3aEZVYjBueUZ6eldTZElUSGxlck1ZdnF1VUM4MURIUDVGVW9qa3RWSGVYNXJmYWo2cTlpejlzYWowdExWaDhWTEtRYjk0WXdhSk9ubnFwVzhrekVyeVJDWlE3Tm15V2U2b0xjc2g4Yno0eXFCc1B4WUlzeA== +kind: Secret +metadata: + annotations: + helm.sh/hook: pre-install + helm.sh/hook-delete-policy: before-hook-creation + labels: + app.kubernetes.io/component: speaker + app.kubernetes.io/instance: metallb + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: metallb + app.kubernetes.io/part-of: metalk8s + helm.sh/chart: metallb-2.4.0 + heritage: metalk8s + name: metallb-memberlist + namespace: metalk8s-loadbalancing + +{% endraw %} diff --git a/salt/metalk8s/addons/metallb/deployed/config.sls b/salt/metalk8s/addons/metallb/deployed/config.sls new file mode 100644 index 0000000000..db1c72cf28 --- /dev/null +++ b/salt/metalk8s/addons/metallb/deployed/config.sls @@ -0,0 +1,19 @@ +include: + - .namespace + +Create MetalLB ConfigMap: + metalk8s_kubernetes.object_present: + - manifest: + apiVersion: v1 + kind: ConfigMap + metadata: + name: metallb-config + namespace: metalk8s-loadbalancing + data: + config: | + address-pools: + - name: control-plane-ingress-ip + protocol: layer2 + addresses: + - {{ salt.metalk8s_network.get_control_plane_ingress_ip() }}/32 + auto-assign: false diff --git a/salt/metalk8s/addons/metallb/deployed/init.sls b/salt/metalk8s/addons/metallb/deployed/init.sls new file mode 100644 index 0000000000..aaa980a0f5 --- /dev/null +++ b/salt/metalk8s/addons/metallb/deployed/init.sls @@ -0,0 +1,4 @@ +include: + - .namespace + - .config + - .chart diff --git a/salt/metalk8s/addons/metallb/deployed/namespace.sls b/salt/metalk8s/addons/metallb/deployed/namespace.sls new file mode 100644 index 0000000000..7f1a349483 --- /dev/null +++ b/salt/metalk8s/addons/metallb/deployed/namespace.sls @@ -0,0 +1,10 @@ +#! metalk8s_kubernetes + +apiVersion: v1 +kind: Namespace +metadata: + name: metalk8s-loadbalancing + labels: + app.kubernetes.io/managed-by: metalk8s + app.kubernetes.io/part-of: metalk8s + heritage: metalk8s diff --git a/salt/metalk8s/addons/nginx-ingress-control-plane/deployed/chart-deployment.sls b/salt/metalk8s/addons/nginx-ingress-control-plane/deployed/chart-deployment.sls new file mode 100644 index 0000000000..25c03bdce9 --- /dev/null +++ b/salt/metalk8s/addons/nginx-ingress-control-plane/deployed/chart-deployment.sls @@ -0,0 +1,590 @@ +#!jinja | metalk8s_kubernetes + +{%- from "metalk8s/repo/macro.sls" import build_image_name with context %} + + + +{% raw %} + +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx-control-plane + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: 0.46.0 + helm.sh/chart: ingress-nginx-3.30.0 + heritage: metalk8s + name: ingress-nginx-control-plane-controller + namespace: metalk8s-ingress +spec: + minAvailable: 0 + selector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx-control-plane + app.kubernetes.io/name: ingress-nginx +--- +apiVersion: v1 +automountServiceAccountToken: true +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx-control-plane + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: 0.46.0 + helm.sh/chart: ingress-nginx-3.30.0 + heritage: metalk8s + name: ingress-nginx-control-plane + namespace: metalk8s-ingress +--- +apiVersion: v1 +automountServiceAccountToken: true +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: default-backend + app.kubernetes.io/instance: ingress-nginx-control-plane + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: 0.46.0 + helm.sh/chart: ingress-nginx-3.30.0 + heritage: metalk8s + name: ingress-nginx-control-plane-backend + namespace: metalk8s-ingress +--- +apiVersion: v1 +data: null +kind: ConfigMap +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx-control-plane + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: 0.46.0 + helm.sh/chart: ingress-nginx-3.30.0 + heritage: metalk8s + name: ingress-nginx-control-plane-controller + namespace: metalk8s-ingress +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: ingress-nginx-control-plane + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: 0.46.0 + helm.sh/chart: ingress-nginx-3.30.0 + heritage: metalk8s + name: ingress-nginx-control-plane + namespace: metalk8s-ingress +rules: +- apiGroups: + - '' + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + verbs: + - list + - watch +- apiGroups: + - '' + resources: + - nodes + verbs: + - get +- apiGroups: + - '' + resources: + - services + verbs: + - get + - list + - watch +- apiGroups: + - extensions + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch +- apiGroups: + - '' + resources: + - events + verbs: + - create + - patch +- apiGroups: + - extensions + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update +- apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/instance: ingress-nginx-control-plane + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: 0.46.0 + helm.sh/chart: ingress-nginx-3.30.0 + heritage: metalk8s + name: ingress-nginx-control-plane + namespace: metalk8s-ingress +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ingress-nginx-control-plane +subjects: +- kind: ServiceAccount + name: ingress-nginx-control-plane + namespace: metalk8s-ingress +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx-control-plane + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: 0.46.0 + helm.sh/chart: ingress-nginx-3.30.0 + heritage: metalk8s + name: ingress-nginx-control-plane + namespace: metalk8s-ingress +rules: +- apiGroups: + - '' + resources: + - namespaces + verbs: + - get +- apiGroups: + - '' + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch +- apiGroups: + - '' + resources: + - services + verbs: + - get + - list + - watch +- apiGroups: + - extensions + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch +- apiGroups: + - extensions + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update +- apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch +- apiGroups: + - '' + resourceNames: + - ingress-control-plane-controller-leader-nginx-control-plane + resources: + - configmaps + verbs: + - get + - update +- apiGroups: + - '' + resources: + - configmaps + verbs: + - create +- apiGroups: + - '' + resources: + - events + verbs: + - create + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx-control-plane + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: 0.46.0 + helm.sh/chart: ingress-nginx-3.30.0 + heritage: metalk8s + name: ingress-nginx-control-plane + namespace: metalk8s-ingress +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ingress-nginx-control-plane +subjects: +- kind: ServiceAccount + name: ingress-nginx-control-plane + namespace: metalk8s-ingress +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx-control-plane + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: 0.46.0 + helm.sh/chart: ingress-nginx-3.30.0 + heritage: metalk8s + name: ingress-nginx-control-plane-controller-metrics + namespace: metalk8s-ingress +spec: + ports: + - name: metrics + port: 10254 + targetPort: metrics + selector: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx-control-plane + app.kubernetes.io/name: ingress-nginx + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + annotations: null + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx-control-plane + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: 0.46.0 + helm.sh/chart: ingress-nginx-3.30.0 + heritage: metalk8s + name: ingress-nginx-control-plane-controller + namespace: metalk8s-ingress +spec: + externalTrafficPolicy: Local + loadBalancerIP: {% endraw -%}{{ salt.metalk8s_network.get_control_plane_ingress_ip() }}{%- raw %} + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: https + selector: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx-control-plane + app.kubernetes.io/name: ingress-nginx + type: LoadBalancer +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: default-backend + app.kubernetes.io/instance: ingress-nginx-control-plane + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: 0.46.0 + helm.sh/chart: ingress-nginx-3.30.0 + heritage: metalk8s + name: ingress-nginx-control-plane-defaultbackend + namespace: metalk8s-ingress +spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: http + selector: + app.kubernetes.io/component: default-backend + app.kubernetes.io/instance: ingress-nginx-control-plane + app.kubernetes.io/name: ingress-nginx + type: ClusterIP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx-control-plane + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: 0.46.0 + helm.sh/chart: ingress-nginx-3.30.0 + heritage: metalk8s + name: ingress-nginx-control-plane-controller + namespace: metalk8s-ingress +spec: + minReadySeconds: 0 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx-control-plane + app.kubernetes.io/name: ingress-nginx + strategy: + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx-control-plane + app.kubernetes.io/name: ingress-nginx + spec: + containers: + - args: + - /nginx-ingress-controller + - --default-backend-service=$(POD_NAMESPACE)/ingress-nginx-control-plane-defaultbackend + - --publish-service=$(POD_NAMESPACE)/ingress-nginx-control-plane-controller + - --election-id=ingress-control-plane-controller-leader + - --ingress-class=nginx-control-plane + - --configmap=$(POD_NAMESPACE)/ingress-nginx-control-plane-controller + - --default-ssl-certificate=metalk8s-ingress/ingress-control-plane-default-certificate + - --metrics-per-host=false + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + image: {% endraw -%}{{ build_image_name("nginx-ingress-controller", False) }}{%- raw %}:v0.46.0 + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + name: controller + ports: + - containerPort: 80 + name: http + protocol: TCP + - containerPort: 443 + name: https + protocol: TCP + - containerPort: 10254 + name: metrics + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + resources: + requests: + cpu: 100m + memory: 90Mi + securityContext: + allowPrivilegeEscalation: true + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + runAsUser: 101 + dnsPolicy: ClusterFirst + nodeSelector: + kubernetes.io/os: linux + node-role.kubernetes.io/master: '' + serviceAccountName: ingress-nginx-control-plane + terminationGracePeriodSeconds: 300 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/bootstrap + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: default-backend + app.kubernetes.io/instance: ingress-nginx-control-plane + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: 0.46.0 + helm.sh/chart: ingress-nginx-3.30.0 + heritage: metalk8s + name: ingress-nginx-control-plane-defaultbackend + namespace: metalk8s-ingress +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: default-backend + app.kubernetes.io/instance: ingress-nginx-control-plane + app.kubernetes.io/name: ingress-nginx + template: + metadata: + labels: + app.kubernetes.io/component: default-backend + app.kubernetes.io/instance: ingress-nginx-control-plane + app.kubernetes.io/name: ingress-nginx + spec: + containers: + - image: {% endraw -%}{{ build_image_name("nginx-ingress-defaultbackend-amd64", False) }}{%- raw %}:1.5 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + name: ingress-nginx-default-backend + ports: + - containerPort: 8080 + name: http + protocol: TCP + readinessProbe: + failureThreshold: 6 + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + initialDelaySeconds: 0 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 65534 + nodeSelector: + node-role.kubernetes.io/master: '' + serviceAccountName: ingress-nginx-control-plane-backend + terminationGracePeriodSeconds: 60 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/bootstrap + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx-control-plane + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: 0.46.0 + helm.sh/chart: ingress-nginx-3.30.0 + heritage: metalk8s + metalk8s.scality.com/monitor: '' + name: ingress-nginx-control-plane-controller + namespace: metalk8s-ingress +spec: + endpoints: + - interval: 30s + port: metrics + namespaceSelector: + matchNames: + - metalk8s-ingress + selector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx-control-plane + app.kubernetes.io/name: ingress-nginx + +{% endraw %} diff --git a/salt/metalk8s/addons/nginx-ingress-control-plane/deployed/init.sls b/salt/metalk8s/addons/nginx-ingress-control-plane/deployed/init.sls index f1e80be14c..a50dd8268f 100644 --- a/salt/metalk8s/addons/nginx-ingress-control-plane/deployed/init.sls +++ b/salt/metalk8s/addons/nginx-ingress-control-plane/deployed/init.sls @@ -1,4 +1,66 @@ include: - metalk8s.addons.nginx-ingress.deployed.namespace - .tls-secret +{#- We use DaemonSet if MetalLB disabled, otherwise we use Deployment #} +{%- if not pillar.networks.control_plane.metalLB.enabled %} - .chart-daemonset +{%- else %} + - .chart-deployment + - metalk8s.addons.metallb.deployed +{%- endif %} + +{%- if not pillar.networks.control_plane.metalLB.enabled %} + +Ensure Nginx Ingress Control Plane Deployment does not exist: + metalk8s_kubernetes.object_absent: + - apiVersion: apps/v1 + - kind: Deployment + - name: ingress-nginx-control-plane-controller + - namespace: metalk8s-ingress + - require: + - sls: metalk8s.addons.nginx-ingress.deployed.namespace + - sls: metalk8s.addons.nginx-ingress-control-plane.deployed.chart-daemonset + +Ensure Nginx Ingress Control Plane defaultbackend Deployment does not exist: + metalk8s_kubernetes.object_absent: + - apiVersion: apps/v1 + - kind: Deployment + - name: ingress-nginx-control-plane-defaultbackend + - namespace: metalk8s-ingress + - require: + - sls: metalk8s.addons.nginx-ingress.deployed.namespace + - sls: metalk8s.addons.nginx-ingress-control-plane.deployed.chart-daemonset + +Ensure Nginx Ingress Control Plane defaultbackend Service does not exist: + metalk8s_kubernetes.object_absent: + - apiVersion: v1 + - kind: Service + - name: ingress-nginx-control-plane-defaultbackend + - namespace: metalk8s-ingress + - require: + - sls: metalk8s.addons.nginx-ingress.deployed.namespace + - sls: metalk8s.addons.nginx-ingress-control-plane.deployed.chart-daemonset + +Ensure Nginx Ingress Control Plane defaultbackend ServiceAccount does not exist: + metalk8s_kubernetes.object_absent: + - apiVersion: v1 + - kind: ServiceAccount + - name: ingress-nginx-control-plane-defaultbackend + - namespace: metalk8s-ingress + - require: + - sls: metalk8s.addons.nginx-ingress.deployed.namespace + - sls: metalk8s.addons.nginx-ingress-control-plane.deployed.chart-daemonset + +{%- else %} + +Ensure Nginx Ingress Control Plane DaemonSet does not exist: + metalk8s_kubernetes.object_absent: + - apiVersion: apps/v1 + - kind: DaemonSet + - name: ingress-nginx-control-plane-controller + - namespace: metalk8s-ingress + - require: + - sls: metalk8s.addons.nginx-ingress.deployed.namespace + - sls: metalk8s.addons.nginx-ingress-control-plane.deployed.chart-deployment + +{%- endif %} diff --git a/salt/metalk8s/defaults.yaml b/salt/metalk8s/defaults.yaml index 95d37476fc..468c28d1ec 100644 --- a/salt/metalk8s/defaults.yaml +++ b/salt/metalk8s/defaults.yaml @@ -84,6 +84,14 @@ networks: 127.0.0.1:7443: expected: nginx description: Apiserver proxy + control_plane_ip:7472: + expected: speaker + description: >- + Control plane MetalLB speaker metrics (only if MetalLB enabled) + control_plane_ip:7946: + expected: speaker + description: >- + Control plane MetalLB speaker (only if MetalLB enabled) ingress_control_plane_ip:8443: expected: kube-proxy description: Control plane nginx ingress diff --git a/salt/tests/unit/formulas/data/base_pillar.yaml b/salt/tests/unit/formulas/data/base_pillar.yaml index 5ae03d616d..f857aacd49 100644 --- a/salt/tests/unit/formulas/data/base_pillar.yaml +++ b/salt/tests/unit/formulas/data/base_pillar.yaml @@ -54,6 +54,8 @@ networks: control_plane: cidr: - 51.68.68.0/24 + metalLB: + enabled: False workload_plane: cidr: - 51.68.68.0/24 diff --git a/tests/conftest.py b/tests/conftest.py index 61e48f0974..7c1bf519a4 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -88,7 +88,7 @@ def control_plane_ingress_ip(k8s_client): name="ingress-nginx-control-plane-controller", namespace="metalk8s-ingress", ) - return ingress_svc.spec.external_i_ps[0] + return ingress_svc.spec.load_balancer_ip or ingress_svc.spec.external_i_ps[0] @pytest.fixture diff --git a/tests/post/features/ingress.feature b/tests/post/features/ingress.feature index 2c834903e0..d62c60cfdd 100644 --- a/tests/post/features/ingress.feature +++ b/tests/post/features/ingress.feature @@ -19,12 +19,33 @@ Feature: Ingress When we perform an HTTP request on port 80 on a control-plane IP Then the server should not respond + Scenario: Failover of Control Plane Ingress VIP using MetalLB + Given the Kubernetes API is available + And we are on a multi node cluster + And MetalLB is enabled + When we stop the node hosting the Control Plane Ingress VIP + Then the node hosting the Control Plane Ingress VIP changed + And we are able to login to Dex as 'admin@metalk8s.invalid' using password 'password' + Scenario: Change Control Plane Ingress IP to node-1 IP Given the Kubernetes API is available And we are on a multi node cluster And pods with label 'app.kubernetes.io/name=ingress-nginx' are 'Ready' - When we update control plane ingress IP to node 'node-1' IP + When we disable MetalLB and set control plane ingress IP to node 'node-1' IP And we wait for the rollout of 'daemonset/ingress-nginx-control-plane-controller' in namespace 'metalk8s-ingress' to complete And we wait for the rollout of 'deploy/dex' in namespace 'metalk8s-auth' to complete Then the control plane ingress IP is equal to node 'node-1' IP And we are able to login to Dex as 'admin@metalk8s.invalid' using password 'password' + + Scenario: Change Control Plane Ingress IP to a new VIP + Given the Kubernetes API is available + And a VIP for control plane is available + And MetalLB is enabled + And pods with label 'app.kubernetes.io/name=ingress-nginx' are 'Ready' + When we set control plane ingress IP to '{{ new_cp_ingress_vip }}' + And we wait for the rollout of 'deploy/metallb-controller' in namespace 'metalk8s-loadbalancing' to complete + And we wait for the rollout of 'daemonset/metallb-speaker' in namespace 'metalk8s-loadbalancing' to complete + And we wait for the rollout of 'deploy/ingress-nginx-control-plane-controller' in namespace 'metalk8s-ingress' to complete + And we wait for the rollout of 'deploy/dex' in namespace 'metalk8s-auth' to complete + Then the control plane ingress IP is equal to '{{ new_cp_ingress_vip }}' + And we are able to login to Dex as 'admin@metalk8s.invalid' using password 'password' diff --git a/tests/post/features/sanity.feature b/tests/post/features/sanity.feature index b80ed3a2f5..277400d6d5 100644 --- a/tests/post/features/sanity.feature +++ b/tests/post/features/sanity.feature @@ -51,10 +51,21 @@ Feature: Cluster Sanity Checks | kube-system | calico-node | | kube-system | kube-proxy | | metalk8s-ingress | ingress-nginx-controller | - | metalk8s-ingress | ingress-nginx-control-plane-controller | | metalk8s-monitoring | prometheus-operator-prometheus-node-exporter | | metalk8s-logging | fluent-bit | + # We do a special case for Control Plane Ingress Controller and MetalLB + # since those ones are not deployed in every environment + Scenario: Control Plane Ingress Controller when MetalLB is disabled + Given MetalLB is disabled + Then the DaemonSet 'ingress-nginx-control-plane-controller' in the 'metalk8s-ingress' namespace has all desired Pods ready + + Scenario: Control Plane Ingress Controller when MetalLB is enabled + Given MetalLB is enabled + Then the DaemonSet 'metallb-speaker' in the 'metalk8s-loadbalancing' namespace has all desired Pods ready + And the Deployment 'metallb-controller' in the 'metalk8s-loadbalancing' namespace has all desired replicas available + And the Deployment 'ingress-nginx-control-plane-controller' in the 'metalk8s-ingress' namespace has all desired replicas available + @volumes_provisioned Scenario Outline: StatefulSet has available replicas Then the StatefulSet in the namespace has all desired replicas available diff --git a/tests/post/steps/test_ingress.py b/tests/post/steps/test_ingress.py index b930cd04ee..5b7a0cab87 100644 --- a/tests/post/steps/test_ingress.py +++ b/tests/post/steps/test_ingress.py @@ -1,5 +1,6 @@ import json import os +import re import requests import requests.exceptions @@ -25,19 +26,36 @@ def test_access_http_services_on_control_plane_ip(host): pass +@scenario( + "../features/ingress.feature", "Failover of Control Plane Ingress VIP using MetalLB" +) +def test_failover_cp_ingress_vip(host, teardown): + pass + + @scenario("../features/ingress.feature", "Change Control Plane Ingress IP to node-1 IP") def test_change_cp_ingress_ip(host, teardown): pass +@scenario("../features/ingress.feature", "Change Control Plane Ingress IP to a new VIP") +def test_change_cp_ingress_vip(host, teardown): + pass + + @pytest.fixture(scope="function") def context(): return {} @pytest.fixture -def teardown(context, host, ssh_config, version): +def teardown(context, host, ssh_config, version, k8s_client): yield + if "node_to_uncordon" in context: + k8s_client.patch_node( + context["node_to_uncordon"], {"spec": {"unschedulable": False}} + ) + if "bootstrap_to_restore" in context: with host.sudo(): host.check_output( @@ -60,6 +78,24 @@ def node_control_plane_ip_is_not_equal_to_its_workload_plane_ip(host): pytest.skip("Node control-plane IP is equal to node workload-plane IP") +@given("a VIP for Control Plane Ingress is available") +def we_have_a_vip(context): + cp_ingress_vip = os.environ.get("CONTROL_PLANE_INGRESS_VIP") + + if not cp_ingress_vip: + pytest.skip("No Control Plane Ingress VIP to switch to") + + context["new_cp_ingress_vip"] = cp_ingress_vip + + +@given("MetalLB is enabled") +def metallb_enabled(host): + metallb_enabled = utils.get_pillar(host, "networks:control_plane:metalLB:enabled") + + if not metallb_enabled: + pytest.skip("MetalLB is not enabled") + + @when(parsers.parse("we perform an {protocol} request on port {port} on a {plane} IP")) def perform_request(host, context, protocol, port, plane): protocols = { @@ -89,11 +125,49 @@ def perform_request(host, context, protocol, port, plane): context["exception"] = exc -@when(parsers.parse("we update control plane ingress IP to node '{node_name}' IP")) -def update_cp_ingress_ip(host, context, ssh_config, version, node_name): +@when("we stop the node hosting the Control Plane Ingress VIP") +def stop_cp_ingress_vip_node(context, k8s_client): + node_name = get_node_hosting_cp_ingress_vip(k8s_client) + + context["cp_ingress_vip_node"] = node_name + context["node_to_uncordon"] = node_name + + # Cordon node + k8s_client.patch_node(node_name, {"spec": {"unschedulable": True}}) + + # Delete Control Plane Ingress Controller from node + cp_ingress_pods = k8s_client.list_namespaced_pod( + "metalk8s-ingress", + label_selector="app.kubernetes.io/instance=ingress-nginx-control-plane", + field_selector="spec.nodeName={}".format(node_name), + ) + for pod in cp_ingress_pods.items: + k8s_client.delete_namespaced_pod(pod.metadata.name, pod.metadata.namespace) + + +@when( + parsers.parse( + "we disable MetalLB and set control plane ingress IP to node '{node_name}' IP" + ) +) +def disable_metallb_patch_cp_ingress_ip(host, context, ssh_config, version, node_name): node = testinfra.get_host(node_name, ssh_config=ssh_config) ip = utils.get_grain(node, "metalk8s:control_plane_ip") + bootstrap_patch = { + "networks": { + "controlPlane": {"metalLB": {"enabled": False}, "ingress": {"ip": ip}} + } + } + + patch_bootstrap_config(context, host, bootstrap_patch) + re_configure_cp_ingress(host, version, ssh_config) + + +@when(parsers.parse("we set control plane ingress IP to '{ip}'")) +def update_control_plane_ingress_ip(host, context, ssh_config, version, ip): + ip = ip.format(**context) + bootstrap_patch = {"networks": {"controlPlane": {"ingress": {"ip": ip}}}} patch_bootstrap_config(context, host, bootstrap_patch) @@ -117,6 +191,15 @@ def server_does_not_respond(host, context): assert isinstance(context["exception"], requests.exceptions.ConnectionError) +@then("the node hosting the Control Plane Ingress VIP changed") +def check_node_hosting_vip_changed(context, k8s_client): + def _check_node_hosting(): + new_node = get_node_hosting_cp_ingress_vip(k8s_client) + assert new_node != context["cp_ingress_vip_node"] + + utils.retry(_check_node_hosting, times=10, wait=3) + + @then(parsers.parse("the control plane ingress IP is equal to node '{node_name}' IP")) def check_cp_ingress_node_ip(control_plane_ingress_ip, node_name, ssh_config): node = testinfra.get_host(node_name, ssh_config=ssh_config) @@ -125,6 +208,38 @@ def check_cp_ingress_node_ip(control_plane_ingress_ip, node_name, ssh_config): assert control_plane_ingress_ip == ip +@then(parsers.parse("the control plane ingress IP is equal to '{ip}'")) +def check_cp_ingress_ip(control_plane_ingress_ip, ip): + ip = ip.format(**context) + assert control_plane_ingress_ip == ip + + +def get_node_hosting_cp_ingress_vip(k8s_client): + # To get the node where sit the VIP we need to look at event on the Service + field_selectors = [ + "reason=nodeAssigned", + "involvedObject.kind=Service", + "involvedObject.name=ingress-nginx-control-plane-controller", + ] + events = k8s_client.list_namespaced_event( + "metalk8s-ingress", + field_selector=",".join(field_selectors), + ) + + assert events.items, "Unable to get event for Control Plane Ingress Service" + + match = re.search( + r'announcing from node "(?P.+)"', + sorted(events.items, key=lambda event: event.last_timestamp, reverse=True)[ + 0 + ].message, + ) + + assert match, "Unable to get the node hosting the Control Plane Ingress VIP" + + return match.group("node") + + def patch_bootstrap_config(context, host, patch): with host.sudo(): cmd_ret = host.check_output("salt-call --out json --local temp.dir") diff --git a/tests/post/steps/test_network.py b/tests/post/steps/test_network.py index e6c5c49575..84c34dd3ad 100644 --- a/tests/post/steps/test_network.py +++ b/tests/post/steps/test_network.py @@ -38,6 +38,9 @@ def check_all_listening_process(host, version, control_plane_ingress_ip): 111, # rpcbind "127.0.0.1:25", # smtp ] + # We ignore this range as this one is dynamically assigned + # for example for loadbalancer service node port + service_node_port_range = range(30000, 32767) # Get all listening process with host.sudo(): @@ -103,6 +106,10 @@ def check_all_listening_process(host, version, control_plane_ingress_ip): if any(key in ignored_listening_processes for key in keys): continue + # Ignore service node port range if process name is "kube-proxy" + if int(port) in service_node_port_range and process["name"] == "kube-proxy": + continue + # NOTE: Special case for containerd which uses a "random" port if ip == "127.0.0.1" and process["name"] == "containerd": continue diff --git a/tests/post/steps/test_sanity.py b/tests/post/steps/test_sanity.py index f1530229bb..b003e28997 100644 --- a/tests/post/steps/test_sanity.py +++ b/tests/post/steps/test_sanity.py @@ -1,7 +1,7 @@ from kubernetes.client import AppsV1Api from kubernetes.client.rest import ApiException import pytest -from pytest_bdd import scenario, then, parsers +from pytest_bdd import scenario, given, then, parsers from tests import kube_utils from tests import utils @@ -69,6 +69,33 @@ def test_statefulset_running(host): pass +@scenario( + "../features/sanity.feature", + "Control Plane Ingress Controller when MetalLB is disabled", +) +def test_cp_ingress_controller_no_metallb(host): + pass + + +@scenario( + "../features/sanity.feature", + "Control Plane Ingress Controller when MetalLB is enabled", +) +def test_cp_ingress_controller_metallb(host): + pass + + +# }}} +# Given {{{ + + +@given(parsers.parse("MetalLB is {state}")) +def is_metalb_enabled(host, state): + expected = state == "enabled" + if expected != utils.get_pillar(host, "networks:control_plane:metalLB:enabled"): + pytest.skip("We skip as we run this test only if MetalLB {}".format(state)) + + # }}} # Then {{{ @@ -152,6 +179,12 @@ def check_static_pod(k8s_client, name, namespace, role): "the Deployment in the namespace has all desired " "replicas available" ) +@then( + parsers.parse( + "the Deployment '{name}' in the '{namespace}' namespace has all desired " + "replicas available" + ) +) def check_deployment(apps_client, name, namespace): def _wait_for_deployment(): try: @@ -178,6 +211,12 @@ def _wait_for_deployment(): @then("the DaemonSet in the namespace has all desired " "Pods ready") +@then( + parsers.parse( + "the DaemonSet '{name}' in the '{namespace}' namespace has all desired " + "Pods ready" + ) +) def check_daemonset(apps_client, name, namespace): def _wait_for_daemon_set(): try: diff --git a/tox.ini b/tox.ini index e4ed7a79ad..a8e19a2e99 100644 --- a/tox.ini +++ b/tox.ini @@ -122,6 +122,7 @@ passenv = TEST_HOSTS_LIST ISO_MOUNTPOINT BOOTSTRAP_BACKUP_ARCHIVE + CONTROL_PLANE_INGRESS_VIP setenv = VAGRANT_CWD={toxinidir} commands_pre =