From 3a8c3f4bd4e62865cb2fe61fc7b16b58d9db4ae5 Mon Sep 17 00:00:00 2001 From: Predrag Rogic Date: Mon, 7 Aug 2023 01:29:04 +0100 Subject: [PATCH 1/5] deploy our custom coredns addon --- pkg/kapi/kapi.go | 76 ++-- .../bootstrapper/bsutil/kverify/pod_ready.go | 61 ++++ .../bootstrapper/kubeadm/dns/coredns.go | 344 ++++++++++++++++++ pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 31 +- pkg/minikube/node/start.go | 57 ++- 5 files changed, 508 insertions(+), 61 deletions(-) create mode 100644 pkg/minikube/bootstrapper/kubeadm/dns/coredns.go diff --git a/pkg/kapi/kapi.go b/pkg/kapi/kapi.go index 2da81dcff6fa..18f0b379304e 100644 --- a/pkg/kapi/kapi.go +++ b/pkg/kapi/kapi.go @@ -211,41 +211,41 @@ func KubectlBinaryPath(version string) string { return path.Join(vmpath.GuestPersistentDir, "binaries", version, "kubectl") } -// ScaleDeployment tries to set the number of deployment replicas in namespace and context. -// It will retry (usually needed due to "the object has been modified; please apply your changes to the latest version and try again" error) up to ReasonableMutateTime to ensure target scale is achieved. -func ScaleDeployment(kcontext, namespace, deploymentName string, replicas int) error { - client, err := Client(kcontext) - if err != nil { - return fmt.Errorf("client: %v", err) - } - - err = wait.PollUntilContextTimeout(context.Background(), kconst.APICallRetryInterval, ReasonableMutateTime, true, func(ctx context.Context) (bool, error) { - scale, err := client.AppsV1().Deployments(namespace).GetScale(ctx, deploymentName, meta.GetOptions{}) - if err != nil { - if !IsRetryableAPIError(err) { - return false, fmt.Errorf("non-retryable failure while getting %q deployment scale: %v", deploymentName, err) - } - klog.Warningf("failed getting %q deployment scale, will retry: %v", deploymentName, err) - return false, nil - } - if scale.Spec.Replicas != int32(replicas) { - scale.Spec.Replicas = int32(replicas) - if _, err = client.AppsV1().Deployments(namespace).UpdateScale(ctx, deploymentName, scale, meta.UpdateOptions{}); err != nil { - if !IsRetryableAPIError(err) { - return false, fmt.Errorf("non-retryable failure while rescaling %s deployment: %v", deploymentName, err) - } - klog.Warningf("failed rescaling %s deployment, will retry: %v", deploymentName, err) - } - // repeat (if change was successful - once again to check & confirm requested scale) - return false, nil - } - return true, nil - }) - if err != nil { - klog.Warningf("failed rescaling %q deployment in %q namespace and %q context to %d replicas: %v", deploymentName, namespace, kcontext, replicas, err) - return err - } - klog.Infof("%q deployment in %q namespace and %q context rescaled to %d replicas", deploymentName, namespace, kcontext, replicas) - - return nil -} +// // ScaleDeployment tries to set the number of deployment replicas in namespace and context. +// // It will retry (usually needed due to "the object has been modified; please apply your changes to the latest version and try again" error) up to ReasonableMutateTime to ensure target scale is achieved. +// func ScaleDeployment(kcontext, namespace, deploymentName string, replicas int) error { +// client, err := Client(kcontext) +// if err != nil { +// return fmt.Errorf("client: %v", err) +// } + +// err = wait.PollUntilContextTimeout(context.Background(), kconst.APICallRetryInterval, ReasonableMutateTime, true, func(ctx context.Context) (bool, error) { +// scale, err := client.AppsV1().Deployments(namespace).GetScale(ctx, deploymentName, meta.GetOptions{}) +// if err != nil { +// if !IsRetryableAPIError(err) { +// return false, fmt.Errorf("non-retryable failure while getting %q deployment scale: %v", deploymentName, err) +// } +// klog.Warningf("failed getting %q deployment scale, will retry: %v", deploymentName, err) +// return false, nil +// } +// if scale.Spec.Replicas != int32(replicas) { +// scale.Spec.Replicas = int32(replicas) +// if _, err = client.AppsV1().Deployments(namespace).UpdateScale(ctx, deploymentName, scale, meta.UpdateOptions{}); err != nil { +// if !IsRetryableAPIError(err) { +// return false, fmt.Errorf("non-retryable failure while rescaling %s deployment: %v", deploymentName, err) +// } +// klog.Warningf("failed rescaling %s deployment, will retry: %v", deploymentName, err) +// } +// // repeat (if change was successful - once again to check & confirm requested scale) +// return false, nil +// } +// return true, nil +// }) +// if err != nil { +// klog.Warningf("failed rescaling %q deployment in %q namespace and %q context to %d replicas: %v", deploymentName, namespace, kcontext, replicas, err) +// return err +// } +// klog.Infof("%q deployment in %q namespace and %q context rescaled to %d replicas", deploymentName, namespace, kcontext, replicas) + +// return nil +// } diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/pod_ready.go b/pkg/minikube/bootstrapper/bsutil/kverify/pod_ready.go index c2cc9fd63a0f..d094d2c9832f 100644 --- a/pkg/minikube/bootstrapper/bsutil/kverify/pod_ready.go +++ b/pkg/minikube/bootstrapper/bsutil/kverify/pod_ready.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" + "k8s.io/minikube/pkg/kapi" kconst "k8s.io/minikube/third_party/kubeadm/app/constants" ) @@ -155,3 +156,63 @@ func IsPodReady(pod *core.Pod) (ready bool, reason string) { } return false, fmt.Sprintf("pod %q in %q namespace does not have %q status: %+v", pod.Name, pod.Namespace, core.PodReady, pod.Status) } + +// UnloathPods deletes once pod(s) with label in namespace if they don't become Ready within timeout after they're Running. +func UnloathPods(ctx context.Context, kcontext, label, namespace string, timeout time.Duration) error { + client, err := kapi.Client(kcontext) + if err != nil { + return fmt.Errorf("kapi client: %v", err) + } + + var pods *core.PodList + lap := time.Now() + // need at least one running pod + if err := wait.PollUntilContextCancel(ctx, kconst.APICallRetryInterval, true, func(_ context.Context) (done bool, err error) { + pods, err = client.CoreV1().Pods(namespace).List(ctx, meta.ListOptions{LabelSelector: label}) + if err != nil || len(pods.Items) == 0 { + // reduce log spam + if time.Since(lap) > (2 * time.Second) { + klog.Infof("waiting for running pod(s) with %q label in %q namespace (error: %v)...", label, namespace, err) + lap = time.Now() + } + return false, nil + } + + running := false + for _, pod := range pods.Items { + if running = (pod.Status.Phase == core.PodRunning); running { + break + } + } + if !running { + return false, nil + } + return true, nil + }); err != nil { + return fmt.Errorf("waiting for running pod(s) with %q label in %q namespace failed: %v", label, namespace, err) + } + + // need at least one pod to become ready - within timeout + if err := wait.PollUntilContextTimeout(ctx, kconst.APICallRetryInterval, timeout, true, func(_ context.Context) (done bool, err error) { + ready := false + for _, pod := range pods.Items { + if ready, _ = IsPodReady(&pod); ready { + break + } + } + if !ready { + return false, nil + } + return true, nil + }); err != nil { + klog.Errorf("waiting for ready pod(s) with %q label in %q namespace failed (will try deleting them once): %v", label, namespace, err) + now := int64(0) + if err := client.CoreV1().Pods(namespace).DeleteCollection(ctx, meta.DeleteOptions{GracePeriodSeconds: &now}, meta.ListOptions{LabelSelector: label}); err != nil { + return fmt.Errorf("deleting pod(s) with %q label in %q namespace failed: %v", label, namespace, err) + } + klog.Infof("deleting pod(s) with %q label in %q namespace initiated", label, namespace) + return nil + } + klog.Infof("pod(s) with %q label in %q namespace reached %q condition within %v", label, namespace, core.PodReady, timeout) + return nil +} diff --git a/pkg/minikube/bootstrapper/kubeadm/dns/coredns.go b/pkg/minikube/bootstrapper/kubeadm/dns/coredns.go new file mode 100644 index 000000000000..e6d960655bbc --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/dns/coredns.go @@ -0,0 +1,344 @@ +/* +Copyright 2023 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// minikube-specific CoreDNS manifests based on the default kubeadm's embedded ones +// ref: https://github.com/kubernetes/kubernetes/blob/master/cmd/kubeadm/app/phases/addons/dns/manifests.go + +package dns + +import ( + "bytes" + "context" + "fmt" + "html/template" + "os/exec" + "path" + "strings" + "time" + + "k8s.io/klog/v2" + "k8s.io/minikube/pkg/kapi" + "k8s.io/minikube/pkg/minikube/assets" + "k8s.io/minikube/pkg/minikube/bootstrapper/images" + "k8s.io/minikube/pkg/minikube/command" + "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/vmpath" + kconst "k8s.io/minikube/third_party/kubeadm/app/constants" +) + +const ( + // CoreDNSService is the CoreDNS Service manifest + CoreDNSService = ` +apiVersion: v1 +kind: Service +metadata: + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + name: kube-dns + namespace: kube-system + annotations: + prometheus.io/port: "9153" + prometheus.io/scrape: "true" + # Without this resourceVersion value, an update of the Service between versions will yield: + # Service "kube-dns" is invalid: metadata.resourceVersion: Invalid value: "": must be specified for an update + resourceVersion: "0" +spec: + clusterIP: {{ .DNSIP }} + ports: + - name: dns + port: 53 + protocol: UDP + targetPort: 53 + - name: dns-tcp + port: 53 + protocol: TCP + targetPort: 53 + - name: metrics + port: 9153 + protocol: TCP + targetPort: 9153 + selector: + k8s-app: kube-dns +` + + // CoreDNSDeployment is the CoreDNS Deployment manifest + CoreDNSDeployment = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .DeploymentName }} + namespace: kube-system + labels: + k8s-app: kube-dns +spec: + replicas: {{ .Replicas }} + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + k8s-app: kube-dns + template: + metadata: + labels: + k8s-app: kube-dns + spec: + priorityClassName: system-cluster-critical + serviceAccountName: coredns + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: k8s-app + operator: In + values: ["kube-dns"] + topologyKey: kubernetes.io/hostname + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - key: {{ .ControlPlaneTaintKey }} + effect: NoSchedule + nodeSelector: + kubernetes.io/os: linux + containers: + - name: coredns + image: {{ .Image }} + imagePullPolicy: IfNotPresent + resources: + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + args: [ "-conf", "/etc/coredns/Corefile" ] + volumeMounts: + - name: config-volume + mountPath: /etc/coredns + readOnly: true + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP + livenessProbe: + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + path: /ready + port: 8181 + scheme: HTTP + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - all + readOnlyRootFilesystem: true + dnsPolicy: Default + volumes: + - name: config-volume + configMap: + name: coredns + items: + - key: Corefile + path: Corefile +` + + // CoreDNSConfigMap is the CoreDNS ConfigMap manifest + CoreDNSConfigMap = ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns + namespace: kube-system +data: + Corefile: | + .:53 { + log + errors + health { + lameduck 5s + } + ready + kubernetes {{ .DNSDomain }} in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + } + prometheus :9153 + hosts { + {{ .MinikubeHostIP }} {{ .MinikubeHostFQDN }} + fallthrough + } + forward . /etc/resolv.conf { + max_concurrent 1000 + } + cache 30 + loop + reload + loadbalance + } +` + // CoreDNSClusterRole is the CoreDNS ClusterRole manifest + CoreDNSClusterRole = ` +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:coredns +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch +` + // CoreDNSClusterRoleBinding is the CoreDNS Clusterrolebinding manifest + CoreDNSClusterRoleBinding = ` +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:coredns +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:coredns +subjects: +- kind: ServiceAccount + name: coredns + namespace: kube-system +` + // CoreDNSServiceAccount is the CoreDNS ServiceAccount manifest + CoreDNSServiceAccount = ` +apiVersion: v1 +kind: ServiceAccount +metadata: + name: coredns + namespace: kube-system +` +) + +func DeployCoreDNS(cc config.ClusterConfig, r command.Runner, hostIP, hostFQDN string) error { + manifests, err := coreDNSManifests(cc, hostIP, hostFQDN) + if err != nil { + return fmt.Errorf("coredns manifests: %v", err) + } + klog.Infof("coredns manifests:\n%s\n", manifests) + + // copy over manifests file + manifestPath := path.Join(vmpath.GuestAddonsDir, "coredns.yaml") + m := assets.NewMemoryAssetTarget(manifests, manifestPath, "0640") + if err := r.Copy(m); err != nil { + return fmt.Errorf("coredns asset copy: %v", err) + } + + // apply manifests file + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + kubectl := kapi.KubectlBinaryPath(cc.KubernetesConfig.KubernetesVersion) + klog.Infof("applying CoreDNS manifests using %s ...", kubectl) + cmd := exec.CommandContext(ctx, "sudo", kubectl, "apply", fmt.Sprintf("--kubeconfig=%s", path.Join(vmpath.GuestPersistentDir, "kubeconfig")), "-f", manifestPath) + if rr, err := r.RunCmd(cmd); err != nil { + return fmt.Errorf("coredns apply cmd: %q output: %q error: %v", rr.Command(), rr.Output(), err) + } + + return nil +} + +func coreDNSManifests(cc config.ClusterConfig, hostIP, hostFQDN string) ([]byte, error) { + toml := CoreDNSServiceAccount + "---" + + CoreDNSClusterRole + "---" + + CoreDNSClusterRoleBinding + "---" + + CoreDNSConfigMap + "---" + + CoreDNSDeployment + "---" + + CoreDNSService + + dnsip, err := kconst.GetDNSIP(cc.KubernetesConfig.ServiceCIDR, true) + if err != nil { + return nil, err + } + + image := "" + imgs, err := images.Kubeadm(cc.KubernetesConfig.ImageRepository, cc.KubernetesConfig.KubernetesVersion) + if err != nil { + return nil, fmt.Errorf("kubeadm images: %v", err) + } + for _, img := range imgs { + if strings.Contains(img, kconst.CoreDNSImageName) { + image = img + break + } + } + if image == "" { + return nil, fmt.Errorf("coredns image not found") + } + + var replicas int32 = 1 + + params := struct { + DNSDomain, MinikubeHostIP, MinikubeHostFQDN string + DeploymentName, ControlPlaneTaintKey, Image string + Replicas *int32 + DNSIP string + }{ + DNSDomain: cc.KubernetesConfig.DNSDomain, + MinikubeHostIP: hostIP, + MinikubeHostFQDN: hostFQDN, + DeploymentName: kconst.CoreDNSDeploymentName, + ControlPlaneTaintKey: kconst.LabelNodeRoleControlPlane, + Image: image, + Replicas: &replicas, + DNSIP: dnsip.String(), + } + + t := template.Must(template.New("coredns").Parse(toml)) + var manifests bytes.Buffer + if err = t.Execute(&manifests, params); err != nil { + return nil, fmt.Errorf("executing CoreDNS template: %v", err) + } + + return manifests.Bytes(), nil +} diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index fdba86768dd8..6d1b61d2d612 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -242,8 +242,12 @@ func (k *Bootstrapper) init(cfg config.ClusterConfig) error { ctx, cancel := context.WithTimeout(context.Background(), initTimeoutMinutes*time.Minute) defer cancel() kr, kw := io.Pipe() - c := exec.CommandContext(ctx, "/bin/bash", "-c", fmt.Sprintf("%s init --config %s %s --ignore-preflight-errors=%s", - bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), conf, extraFlags, strings.Join(ignore, ","))) + flags := fmt.Sprintf("%s init --config %s %s --ignore-preflight-errors=%s", + bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), conf, extraFlags, strings.Join(ignore, ",")) + if !cfg.DisableOptimizations { + flags += " --skip-phases=addon/coredns" + } + c := exec.CommandContext(ctx, "/bin/bash", "-c", flags) c.Stdout = kw c.Stderr = kw var wg sync.WaitGroup @@ -632,6 +636,8 @@ func (k *Bootstrapper) needsReconfigure(conf string, hostname string, port int, } // restartCluster restarts the Kubernetes cluster configured by kubeadm +// +//gocyclo:ignore func (k *Bootstrapper) restartControlPlane(cfg config.ClusterConfig) error { klog.Infof("restartCluster start") @@ -750,11 +756,28 @@ func (k *Bootstrapper) restartControlPlane(cfg config.ClusterConfig) error { } // This can fail during upgrades if the old pods have not shut down yet + // ref: https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init-phase/#cmd-phase-addon addonPhase := func() error { - addons := "all" - if cfg.KubernetesConfig.ExtraOptions.Exists("kubeadm.skip-phases=addon/kube-proxy") { + addons := "" + + kubeproxy := !cfg.KubernetesConfig.ExtraOptions.Exists("kubeadm.skip-phases=addon/kube-proxy") + coredns := cfg.DisableOptimizations // disabling optimisations will use default kubeadm's coredns addon instead of our custom one + + if kubeproxy { + addons = "kube-proxy" + } + if coredns { addons = "coredns" } + if kubeproxy && coredns { + addons = "all" + } + + if addons == "" { + // skip phase addon + return nil + } + _, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s phase addon %s --config %s", baseCmd, addons, conf))) return err } diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index fd4c00921b4e..4acab7da95ff 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -34,7 +34,6 @@ import ( "github.com/pkg/errors" "github.com/spf13/viper" "golang.org/x/sync/errgroup" - meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" "k8s.io/minikube/pkg/addons" @@ -44,6 +43,7 @@ import ( "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil" "k8s.io/minikube/pkg/minikube/bootstrapper/images" "k8s.io/minikube/pkg/minikube/bootstrapper/kubeadm" + "k8s.io/minikube/pkg/minikube/bootstrapper/kubeadm/dns" "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/cni" "k8s.io/minikube/pkg/minikube/command" @@ -68,7 +68,6 @@ import ( "k8s.io/minikube/pkg/network" "k8s.io/minikube/pkg/util" "k8s.io/minikube/pkg/util/retry" - kconst "k8s.io/minikube/third_party/kubeadm/app/constants" ) const waitTimeout = "wait-timeout" @@ -91,6 +90,8 @@ type Starter struct { } // Start spins up a guest and starts the Kubernetes node. +// +//gocyclo:ignore func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { var wg sync.WaitGroup stopk8s, err := handleNoKubernetes(starter) @@ -136,10 +137,43 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { var kcs *kubeconfig.Settings var bs bootstrapper.Bootstrapper if apiServer { - kcs, bs, err = handleAPIServer(starter, cr, hostIP) + kcs, bs, err = handleAPIServer(starter, cr) if err != nil { return nil, err } + + if !starter.Cfg.DisableOptimizations { + // deploy our custom CoreDNS addon + wg.Add(1) + go func() { + defer wg.Done() + if err := dns.DeployCoreDNS(*starter.Cfg, starter.Runner, hostIP.String(), constants.HostAlias); err != nil { + out.FailureT("Unable to deploy CoreDNS: {{.error}}", out.V{"error": err}) + } + }() + + // wg.Add(1) + // go func() { + // defer wg.Done() + // // // Scale down CoreDNS from default 2 to 1 replica. + // // if err := kapi.ScaleDeployment(starter.Cfg.Name, meta.NamespaceSystem, kconst.CoreDNSDeploymentName, 1); err != nil { + // // klog.Errorf("Unable to scale down deployment %q in namespace %q to 1 replica: %v", kconst.CoreDNSDeploymentName, meta.NamespaceSystem, err) + // // } + + // // benchmark hack: ensure CoreDNS is Ready for tests - might add overhead of few seconds to each startup time! + // timeout := 5 * time.Second + // klog.Infof("ensure CoreDNS is Ready within %v after starting...", timeout) + // if err := kverify.UnloathPods(context.Background(), starter.Cfg.Name, "k8s-app=kube-dns", meta.NamespaceSystem, timeout); err != nil { + // klog.Errorf("unable to ensure CoreDNS Ready condition %v after starting: %v", timeout, err) + // } + // }() + } else { + // Inject {"host.minikube.internal": hostIP} record into CoreDNS. + if err := addCoreDNSEntry(starter.Runner, constants.HostAlias, hostIP.String(), *starter.Cfg); err != nil { + klog.Warningf("Unable to inject {%q: %s} record into CoreDNS: %v", "host.minikube.internal", hostIP.String(), err) + out.Err("Failed to inject host.minikube.internal into CoreDNS, this will limit the pods access to the host IP") + } + } } else { bs, err = cluster.Bootstrapper(starter.MachineAPI, viper.GetString(cmdcfg.Bootstrapper), *starter.Cfg, starter.Runner) if err != nil { @@ -213,13 +247,6 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { } } - if !starter.Cfg.DisableOptimizations { - // Scale down CoreDNS from default 2 to 1 replica. - if err := kapi.ScaleDeployment(starter.Cfg.Name, meta.NamespaceSystem, kconst.CoreDNSDeploymentName, 1); err != nil { - klog.Errorf("Unable to scale down deployment %q in namespace %q to 1 replica: %v", kconst.CoreDNSDeploymentName, meta.NamespaceSystem, err) - } - } - klog.Infof("Will wait %s for node %+v", viper.GetDuration(waitTimeout), starter.Node) if err := bs.WaitForNode(*starter.Cfg, *starter.Node, viper.GetDuration(waitTimeout)); err != nil { return nil, errors.Wrapf(err, "wait %s for node", viper.GetDuration(waitTimeout)) @@ -261,7 +288,7 @@ func handleNoKubernetes(starter Starter) (bool, error) { } // handleAPIServer handles starting the API server. -func handleAPIServer(starter Starter, cr cruntime.Manager, hostIP net.IP) (*kubeconfig.Settings, bootstrapper.Bootstrapper, error) { +func handleAPIServer(starter Starter, cr cruntime.Manager) (*kubeconfig.Settings, bootstrapper.Bootstrapper, error) { var err error // Must be written before bootstrap, otherwise health checks may flake due to stale IP. @@ -284,14 +311,6 @@ func handleAPIServer(starter Starter, cr cruntime.Manager, hostIP net.IP) (*kube return nil, bs, errors.Wrap(err, "Failed kubeconfig update") } - // Not running this in a Go func can result in DNS answering taking up to 38 seconds, with the Go func it takes 6-10 seconds. - go func() { - // Inject {"host.minikube.internal": hostIP} record into CoreDNS. - if err := addCoreDNSEntry(starter.Runner, "host.minikube.internal", hostIP.String(), *starter.Cfg); err != nil { - klog.Warningf("Unable to inject {%q: %s} record into CoreDNS: %v", "host.minikube.internal", hostIP.String(), err) - out.Err("Failed to inject host.minikube.internal into CoreDNS, this will limit the pods access to the host IP") - } - }() return kcs, bs, nil } From b6fce457997a5e4ffde962751bf6400835724517 Mon Sep 17 00:00:00 2001 From: Predrag Rogic Date: Mon, 7 Aug 2023 01:52:47 +0100 Subject: [PATCH 2/5] cleanup --- pkg/kapi/kapi.go | 39 ------------------- .../bootstrapper/kubeadm/dns/coredns.go | 2 + pkg/minikube/node/start.go | 7 +--- 3 files changed, 3 insertions(+), 45 deletions(-) diff --git a/pkg/kapi/kapi.go b/pkg/kapi/kapi.go index 18f0b379304e..61317d70d7c7 100644 --- a/pkg/kapi/kapi.go +++ b/pkg/kapi/kapi.go @@ -210,42 +210,3 @@ func IsRetryableAPIError(err error) bool { func KubectlBinaryPath(version string) string { return path.Join(vmpath.GuestPersistentDir, "binaries", version, "kubectl") } - -// // ScaleDeployment tries to set the number of deployment replicas in namespace and context. -// // It will retry (usually needed due to "the object has been modified; please apply your changes to the latest version and try again" error) up to ReasonableMutateTime to ensure target scale is achieved. -// func ScaleDeployment(kcontext, namespace, deploymentName string, replicas int) error { -// client, err := Client(kcontext) -// if err != nil { -// return fmt.Errorf("client: %v", err) -// } - -// err = wait.PollUntilContextTimeout(context.Background(), kconst.APICallRetryInterval, ReasonableMutateTime, true, func(ctx context.Context) (bool, error) { -// scale, err := client.AppsV1().Deployments(namespace).GetScale(ctx, deploymentName, meta.GetOptions{}) -// if err != nil { -// if !IsRetryableAPIError(err) { -// return false, fmt.Errorf("non-retryable failure while getting %q deployment scale: %v", deploymentName, err) -// } -// klog.Warningf("failed getting %q deployment scale, will retry: %v", deploymentName, err) -// return false, nil -// } -// if scale.Spec.Replicas != int32(replicas) { -// scale.Spec.Replicas = int32(replicas) -// if _, err = client.AppsV1().Deployments(namespace).UpdateScale(ctx, deploymentName, scale, meta.UpdateOptions{}); err != nil { -// if !IsRetryableAPIError(err) { -// return false, fmt.Errorf("non-retryable failure while rescaling %s deployment: %v", deploymentName, err) -// } -// klog.Warningf("failed rescaling %s deployment, will retry: %v", deploymentName, err) -// } -// // repeat (if change was successful - once again to check & confirm requested scale) -// return false, nil -// } -// return true, nil -// }) -// if err != nil { -// klog.Warningf("failed rescaling %q deployment in %q namespace and %q context to %d replicas: %v", deploymentName, namespace, kcontext, replicas, err) -// return err -// } -// klog.Infof("%q deployment in %q namespace and %q context rescaled to %d replicas", deploymentName, namespace, kcontext, replicas) - -// return nil -// } diff --git a/pkg/minikube/bootstrapper/kubeadm/dns/coredns.go b/pkg/minikube/bootstrapper/kubeadm/dns/coredns.go index e6d960655bbc..a2e982bfeb54 100644 --- a/pkg/minikube/bootstrapper/kubeadm/dns/coredns.go +++ b/pkg/minikube/bootstrapper/kubeadm/dns/coredns.go @@ -261,6 +261,7 @@ metadata: ` ) +// DeployCoreDNS deploys custom CoreDNS addon manifests func DeployCoreDNS(cc config.ClusterConfig, r command.Runner, hostIP, hostFQDN string) error { manifests, err := coreDNSManifests(cc, hostIP, hostFQDN) if err != nil { @@ -288,6 +289,7 @@ func DeployCoreDNS(cc config.ClusterConfig, r command.Runner, hostIP, hostFQDN s return nil } +// coreDNSManifests generates custom CoreDNS addon manifests func coreDNSManifests(cc config.ClusterConfig, hostIP, hostFQDN string) ([]byte, error) { toml := CoreDNSServiceAccount + "---" + CoreDNSClusterRole + "---" + diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 4acab7da95ff..18bd8e5c8d0d 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -152,15 +152,10 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { } }() + // benchmark hack: ensure CoreDNS is Ready for tests - might add overhead of few seconds to each startup time! // wg.Add(1) // go func() { // defer wg.Done() - // // // Scale down CoreDNS from default 2 to 1 replica. - // // if err := kapi.ScaleDeployment(starter.Cfg.Name, meta.NamespaceSystem, kconst.CoreDNSDeploymentName, 1); err != nil { - // // klog.Errorf("Unable to scale down deployment %q in namespace %q to 1 replica: %v", kconst.CoreDNSDeploymentName, meta.NamespaceSystem, err) - // // } - - // // benchmark hack: ensure CoreDNS is Ready for tests - might add overhead of few seconds to each startup time! // timeout := 5 * time.Second // klog.Infof("ensure CoreDNS is Ready within %v after starting...", timeout) // if err := kverify.UnloathPods(context.Background(), starter.Cfg.Name, "k8s-app=kube-dns", meta.NamespaceSystem, timeout); err != nil { From 8de81377cf7b0f8f4f5586fc099dc6812fe2ec91 Mon Sep 17 00:00:00 2001 From: Predrag Rogic Date: Sun, 13 Aug 2023 16:59:07 +0100 Subject: [PATCH 3/5] cleanup --- .../bootstrapper/bsutil/kverify/pod_ready.go | 61 ------------------- pkg/minikube/node/start.go | 11 ---- 2 files changed, 72 deletions(-) diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/pod_ready.go b/pkg/minikube/bootstrapper/bsutil/kverify/pod_ready.go index d094d2c9832f..c2cc9fd63a0f 100644 --- a/pkg/minikube/bootstrapper/bsutil/kverify/pod_ready.go +++ b/pkg/minikube/bootstrapper/bsutil/kverify/pod_ready.go @@ -27,7 +27,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" - "k8s.io/minikube/pkg/kapi" kconst "k8s.io/minikube/third_party/kubeadm/app/constants" ) @@ -156,63 +155,3 @@ func IsPodReady(pod *core.Pod) (ready bool, reason string) { } return false, fmt.Sprintf("pod %q in %q namespace does not have %q status: %+v", pod.Name, pod.Namespace, core.PodReady, pod.Status) } - -// UnloathPods deletes once pod(s) with label in namespace if they don't become Ready within timeout after they're Running. -func UnloathPods(ctx context.Context, kcontext, label, namespace string, timeout time.Duration) error { - client, err := kapi.Client(kcontext) - if err != nil { - return fmt.Errorf("kapi client: %v", err) - } - - var pods *core.PodList - lap := time.Now() - // need at least one running pod - if err := wait.PollUntilContextCancel(ctx, kconst.APICallRetryInterval, true, func(_ context.Context) (done bool, err error) { - pods, err = client.CoreV1().Pods(namespace).List(ctx, meta.ListOptions{LabelSelector: label}) - if err != nil || len(pods.Items) == 0 { - // reduce log spam - if time.Since(lap) > (2 * time.Second) { - klog.Infof("waiting for running pod(s) with %q label in %q namespace (error: %v)...", label, namespace, err) - lap = time.Now() - } - return false, nil - } - - running := false - for _, pod := range pods.Items { - if running = (pod.Status.Phase == core.PodRunning); running { - break - } - } - if !running { - return false, nil - } - return true, nil - }); err != nil { - return fmt.Errorf("waiting for running pod(s) with %q label in %q namespace failed: %v", label, namespace, err) - } - - // need at least one pod to become ready - within timeout - if err := wait.PollUntilContextTimeout(ctx, kconst.APICallRetryInterval, timeout, true, func(_ context.Context) (done bool, err error) { - ready := false - for _, pod := range pods.Items { - if ready, _ = IsPodReady(&pod); ready { - break - } - } - if !ready { - return false, nil - } - return true, nil - }); err != nil { - klog.Errorf("waiting for ready pod(s) with %q label in %q namespace failed (will try deleting them once): %v", label, namespace, err) - now := int64(0) - if err := client.CoreV1().Pods(namespace).DeleteCollection(ctx, meta.DeleteOptions{GracePeriodSeconds: &now}, meta.ListOptions{LabelSelector: label}); err != nil { - return fmt.Errorf("deleting pod(s) with %q label in %q namespace failed: %v", label, namespace, err) - } - klog.Infof("deleting pod(s) with %q label in %q namespace initiated", label, namespace) - return nil - } - klog.Infof("pod(s) with %q label in %q namespace reached %q condition within %v", label, namespace, core.PodReady, timeout) - return nil -} diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 18bd8e5c8d0d..df5a1fb95f09 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -151,17 +151,6 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { out.FailureT("Unable to deploy CoreDNS: {{.error}}", out.V{"error": err}) } }() - - // benchmark hack: ensure CoreDNS is Ready for tests - might add overhead of few seconds to each startup time! - // wg.Add(1) - // go func() { - // defer wg.Done() - // timeout := 5 * time.Second - // klog.Infof("ensure CoreDNS is Ready within %v after starting...", timeout) - // if err := kverify.UnloathPods(context.Background(), starter.Cfg.Name, "k8s-app=kube-dns", meta.NamespaceSystem, timeout); err != nil { - // klog.Errorf("unable to ensure CoreDNS Ready condition %v after starting: %v", timeout, err) - // } - // }() } else { // Inject {"host.minikube.internal": hostIP} record into CoreDNS. if err := addCoreDNSEntry(starter.Runner, constants.HostAlias, hostIP.String(), *starter.Cfg); err != nil { From 7e8d02be9f9231a54e57e3dcd66e43482675e9c6 Mon Sep 17 00:00:00 2001 From: Predrag Rogic Date: Thu, 7 Mar 2024 22:18:32 +0000 Subject: [PATCH 4/5] fix lint complaints --- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 2 +- pkg/minikube/node/start.go | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 5949d7a8e58c..69d8bee91cc1 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -587,7 +587,7 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time } // restartPrimaryControlPlane restarts the kubernetes cluster configured by kubeadm. -func (k *Bootstrapper) restartPrimaryControlPlane(cfg config.ClusterConfig) error { +func (k *Bootstrapper) restartPrimaryControlPlane(cfg config.ClusterConfig) error { //nolint:gocyclo klog.Infof("restartPrimaryControlPlane start ...") start := time.Now() diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 5a7d9d50df3d..bc143cbceac6 100755 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -154,10 +154,10 @@ func Start(starter Starter) (*kubeconfig.Settings, error) { // nolint:gocyclo } // scale down CoreDNS from default 2 to 1 replica only for non-ha (non-multi-control plane) cluster and if optimisation is not disabled if !starter.Cfg.DisableOptimizations && !config.IsHA(*starter.Cfg) { - // deploy our custom CoreDNS addon - if err := dns.DeployCoreDNS(*starter.Cfg, starter.Runner, hostIP.String(), constants.HostAlias); err != nil { - out.FailureT("Unable to deploy CoreDNS: {{.error}}", out.V{"error": err}) - } + // deploy our custom CoreDNS addon + if err := dns.DeployCoreDNS(*starter.Cfg, starter.Runner, hostIP.String(), constants.HostAlias); err != nil { + out.FailureT("Unable to deploy CoreDNS: {{.error}}", out.V{"error": err}) + } } }() } From 0da966c23de78e02b4bf6ee2cf5c8ba4b84621cc Mon Sep 17 00:00:00 2001 From: Predrag Rogic Date: Sun, 10 Mar 2024 23:06:57 +0000 Subject: [PATCH 5/5] deploy coredns as daemonset --- .../bootstrapper/kubeadm/dns/coredns.go | 43 +++++------ pkg/minikube/node/start.go | 71 +------------------ 2 files changed, 25 insertions(+), 89 deletions(-) diff --git a/pkg/minikube/bootstrapper/kubeadm/dns/coredns.go b/pkg/minikube/bootstrapper/kubeadm/dns/coredns.go index a2e982bfeb54..6faa062ef1ff 100644 --- a/pkg/minikube/bootstrapper/kubeadm/dns/coredns.go +++ b/pkg/minikube/bootstrapper/kubeadm/dns/coredns.go @@ -29,6 +29,7 @@ import ( "strings" "time" + "github.com/blang/semver/v4" "k8s.io/klog/v2" "k8s.io/minikube/pkg/kapi" "k8s.io/minikube/pkg/minikube/assets" @@ -36,6 +37,7 @@ import ( "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/vmpath" + "k8s.io/minikube/pkg/util" kconst "k8s.io/minikube/third_party/kubeadm/app/constants" ) @@ -79,18 +81,13 @@ spec: // CoreDNSDeployment is the CoreDNS Deployment manifest CoreDNSDeployment = ` apiVersion: apps/v1 -kind: Deployment +kind: DaemonSet metadata: name: {{ .DeploymentName }} namespace: kube-system labels: k8s-app: kube-dns spec: - replicas: {{ .Replicas }} - strategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 selector: matchLabels: k8s-app: kube-dns @@ -102,16 +99,13 @@ spec: priorityClassName: system-cluster-critical serviceAccountName: coredns affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: k8s-app - operator: In - values: ["kube-dns"] - topologyKey: kubernetes.io/hostname + # nodeAffinity is used instead of podAntiAffinity to have the daemonset runnable only on control plane nodes + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .ControlPlaneTaintKey }} + operator: Exists tolerations: - key: CriticalAddonsOnly operator: Exists @@ -164,7 +158,7 @@ spec: add: - NET_BIND_SERVICE drop: - - all + - ALL readOnlyRootFilesystem: true dnsPolicy: Default volumes: @@ -318,12 +312,9 @@ func coreDNSManifests(cc config.ClusterConfig, hostIP, hostFQDN string) ([]byte, return nil, fmt.Errorf("coredns image not found") } - var replicas int32 = 1 - params := struct { DNSDomain, MinikubeHostIP, MinikubeHostFQDN string DeploymentName, ControlPlaneTaintKey, Image string - Replicas *int32 DNSIP string }{ DNSDomain: cc.KubernetesConfig.DNSDomain, @@ -332,10 +323,20 @@ func coreDNSManifests(cc config.ClusterConfig, hostIP, hostFQDN string) ([]byte, DeploymentName: kconst.CoreDNSDeploymentName, ControlPlaneTaintKey: kconst.LabelNodeRoleControlPlane, Image: image, - Replicas: &replicas, DNSIP: dnsip.String(), } + // in k8s v1.20 "node-role.kubernetes.io/control-plane" label was introduced and "node-role.kubernetes.io/master" was deprecated + // ref: https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.20.md#no-really-you-must-read-this-before-you-upgrade + k8sVersion, err := util.ParseKubernetesVersion(cc.KubernetesConfig.KubernetesVersion) + if err != nil { + return nil, fmt.Errorf("parsing Kubernetes version %q: %v", cc.KubernetesConfig.KubernetesVersion, err) + } + + if k8sVersion.LT(semver.Version{Major: 1, Minor: 20}) { + params.ControlPlaneTaintKey = kconst.LabelNodeRoleOldControlPlane + } + t := template.Must(template.New("coredns").Parse(toml)) var manifests bytes.Buffer if err = t.Execute(&manifests, params); err != nil { diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index bc143cbceac6..93b221d74426 100755 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -21,7 +21,6 @@ import ( "net" "os" "os/exec" - "path" "regexp" "strconv" "strings" @@ -38,7 +37,6 @@ import ( cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" "k8s.io/minikube/pkg/addons" "k8s.io/minikube/pkg/drivers/kic/oci" - "k8s.io/minikube/pkg/kapi" "k8s.io/minikube/pkg/minikube/bootstrapper" "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil" "k8s.io/minikube/pkg/minikube/bootstrapper/images" @@ -64,7 +62,6 @@ import ( "k8s.io/minikube/pkg/minikube/reason" "k8s.io/minikube/pkg/minikube/registry" "k8s.io/minikube/pkg/minikube/style" - "k8s.io/minikube/pkg/minikube/vmpath" "k8s.io/minikube/pkg/network" "k8s.io/minikube/pkg/util" "k8s.io/minikube/pkg/util/retry" @@ -140,24 +137,13 @@ func Start(starter Starter) (*kubeconfig.Settings, error) { // nolint:gocyclo if err != nil { return nil, err } - // configure CoreDNS concurently from primary control-plane node only and only on first node start + // deploy our custom CoreDNS addon concurrently from primary control-plane node only and only on first node start if !starter.PreExists { wg.Add(1) go func() { defer wg.Done() - // inject {"host.minikube.internal": hostIP} record into coredns for primary control-plane node host ip - if hostIP != nil { - if err := addCoreDNSEntry(starter.Runner, constants.HostAlias, hostIP.String(), *starter.Cfg); err != nil { - klog.Warningf("Unable to inject {%q: %s} record into CoreDNS: %v", constants.HostAlias, hostIP.String(), err) - out.Err("Failed to inject host.minikube.internal into CoreDNS, this will limit the pods access to the host IP") - } - } - // scale down CoreDNS from default 2 to 1 replica only for non-ha (non-multi-control plane) cluster and if optimisation is not disabled - if !starter.Cfg.DisableOptimizations && !config.IsHA(*starter.Cfg) { - // deploy our custom CoreDNS addon - if err := dns.DeployCoreDNS(*starter.Cfg, starter.Runner, hostIP.String(), constants.HostAlias); err != nil { - out.FailureT("Unable to deploy CoreDNS: {{.error}}", out.V{"error": err}) - } + if err := dns.DeployCoreDNS(*starter.Cfg, starter.Runner, hostIP.String(), constants.HostAlias); err != nil { + out.FailureT("Unable to deploy CoreDNS: {{.error}}", out.V{"error": err}) } }() } @@ -899,57 +885,6 @@ func prepareNone() { } } -// addCoreDNSEntry adds host name and IP record to the DNS by updating CoreDNS's ConfigMap. -// ref: https://coredns.io/plugins/hosts/ -// note: there can be only one 'hosts' block in CoreDNS's ConfigMap (avoid "plugin/hosts: this plugin can only be used once per Server Block" error) -func addCoreDNSEntry(runner command.Runner, name, ip string, cc config.ClusterConfig) error { - kubectl := kapi.KubectlBinaryPath(cc.KubernetesConfig.KubernetesVersion) - kubecfg := path.Join(vmpath.GuestPersistentDir, "kubeconfig") - - // get current coredns configmap via kubectl - get := fmt.Sprintf("sudo %s --kubeconfig=%s -n kube-system get configmap coredns -o yaml", kubectl, kubecfg) - out, err := runner.RunCmd(exec.Command("/bin/bash", "-c", get)) - if err != nil { - klog.Errorf("failed to get current CoreDNS ConfigMap: %v", err) - return err - } - cm := strings.TrimSpace(out.Stdout.String()) - - // check if this specific host entry already exists in coredns configmap, so not to duplicate/override it - host := regexp.MustCompile(fmt.Sprintf(`(?smU)^ *hosts {.*%s.*}`, name)) - if host.MatchString(cm) { - klog.Infof("CoreDNS already contains %q host record, skipping...", name) - return nil - } - - // inject hosts block with host record into coredns configmap - sed := fmt.Sprintf("sed -e '/^ forward . \\/etc\\/resolv.conf.*/i \\ hosts {\\n %s %s\\n fallthrough\\n }'", ip, name) - // check if hosts block already exists in coredns configmap - hosts := regexp.MustCompile(`(?smU)^ *hosts {.*}`) - if hosts.MatchString(cm) { - // inject host record into existing coredns configmap hosts block instead - klog.Info("CoreDNS already contains hosts block, will inject host record there...") - sed = fmt.Sprintf("sed -e '/^ hosts {.*/a \\ %s %s'", ip, name) - } - - // check if logging is already enabled (via log plugin) in coredns configmap, so not to duplicate it - logs := regexp.MustCompile(`(?smU)^ *log *$`) - if !logs.MatchString(cm) { - // inject log plugin into coredns configmap - sed = fmt.Sprintf("%s -e '/^ errors *$/i \\ log'", sed) - } - - // replace coredns configmap via kubectl - replace := fmt.Sprintf("sudo %s --kubeconfig=%s replace -f -", kubectl, kubecfg) - if _, err := runner.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s | %s | %s", get, sed, replace))); err != nil { - klog.Errorf("failed to inject {%q: %s} host record into CoreDNS", name, ip) - return err - } - klog.Infof("{%q: %s} host record injected into CoreDNS's ConfigMap", name, ip) - - return nil -} - // prints a warning to the console against the use of the 'virtualbox' driver, if alternatives are available and healthy func warnVirtualBox() { var altDriverList strings.Builder