Skip to content

Commit

Permalink
split other changes into separate prs
Browse files Browse the repository at this point in the history
  • Loading branch information
prezha committed Apr 23, 2021
1 parent 2b0fedf commit b14789b
Show file tree
Hide file tree
Showing 11 changed files with 127 additions and 169 deletions.
23 changes: 22 additions & 1 deletion cmd/minikube/cmd/start_flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -253,14 +253,17 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
klog.Info("no existing cluster config was found, will generate one from the flags ")
cc = generateNewConfigFromFlags(cmd, k8sVersion, drvName)

cnm, err := cni.New(&cc)
cnm, err := cni.New(cc)
if err != nil {
return cc, config.Node{}, errors.Wrap(err, "cni")
}

if _, ok := cnm.(cni.Disabled); !ok {
klog.Infof("Found %q CNI - setting NetworkPlugin=cni", cnm)
cc.KubernetesConfig.NetworkPlugin = "cni"
if err := setCNIConfDir(&cc, cnm); err != nil {
klog.Errorf("unable to set CNI Config Directory: %v", err)
}
}
}

Expand Down Expand Up @@ -425,6 +428,24 @@ func generateNewConfigFromFlags(cmd *cobra.Command, k8sVersion string, drvName s
return cc
}

// setCNIConfDir sets kubelet's '--cni-conf-dir' flag to custom CNI Config Directory path (same used also by CNI Deployment) to avoid conflicting CNI configs.
// ref: https://github.com/kubernetes/minikube/issues/10984
// Note: currently, this change affects only Kindnet CNI (and all multinodes using it), but it can be easily expanded to other/all CNIs if needed.
func setCNIConfDir(cc *config.ClusterConfig, cnm cni.Manager) error {
if _, kindnet := cnm.(cni.KindNet); kindnet {
// auto-set custom CNI Config Directory, if not user-specified
eo := fmt.Sprintf("kubelet.cni-conf-dir=%s", cni.CustomCNIConfDir)
if !cc.KubernetesConfig.ExtraOptions.Exists(eo) {
klog.Infof("auto-setting extra-config to %q", eo)
if err := cc.KubernetesConfig.ExtraOptions.Set(eo); err != nil {
return fmt.Errorf("failed auto-setting extra-config %q: %v", eo, err)
}
klog.Infof("extra-config set to %q", eo)
}
}
return nil
}

func checkNumaCount(k8sVersion string) {
if viper.GetInt(kvmNUMACount) < 1 || viper.GetInt(kvmNUMACount) > 8 {
exit.Message(reason.Usage, "--kvm-numa-count range is 1-8")
Expand Down
23 changes: 4 additions & 19 deletions hack/jenkins/common.sh
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ export GOPATH="$HOME/go"
export KUBECONFIG="${TEST_HOME}/kubeconfig"
export PATH=$PATH:"/usr/local/bin/:/usr/local/go/bin/:$GOPATH/bin"

readonly TIMEOUT=${1:-90m}
readonly TIMEOUT=${1:-70m}

if [ "$(uname)" != "Darwin" ]; then
# install lsof for finding none driver procs, psmisc to use pstree in cronjobs
Expand All @@ -48,9 +48,6 @@ sudo ./installers/check_install_golang.sh "1.16" "/usr/local" || true
docker system prune --force --volumes || true
docker system df || true

# clean up /tmp
find /tmp -name . -o -prune -exec rm -rf -- {} + >/dev/null 2>&1 || true

echo ">> Starting at $(date)"
echo ""
echo "arch: ${OS_ARCH}"
Expand Down Expand Up @@ -166,23 +163,11 @@ if [[ "${zombie_defuncts}" != "" ]]; then
fi

if type -P virsh; then
sudo virsh -c qemu:///system list --all --uuid \
| xargs -I {} sh -c "sudo virsh -c qemu:///system destroy {}; sudo virsh -c qemu:///system undefine {}" \
virsh -c qemu:///system list --all --uuid \
| xargs -I {} sh -c "virsh -c qemu:///system destroy {}; virsh -c qemu:///system undefine {}" \
|| true
echo ">> virsh VM list after clean up (should be empty):"
sudo virsh -c qemu:///system list --all || true

for NET in $( sudo virsh -c qemu:///system net-list --all --name ); do
if [ "${NET}" != "default" ]; then
sudo virsh -c qemu:///system net-destroy "${NET}" || \
sudo virsh -c qemu:///system net-undefine "${NET}" || true
fi
done
echo ">> virsh VM networks list after clean up (should have only 'default'):"
sudo virsh -c qemu:///system net-list --all || true
echo ">> host networks after KVM clean up:"
sudo ip link show || true
echo
virsh -c qemu:///system list --all || true
fi

if type -P vboxmanage; then
Expand Down
23 changes: 11 additions & 12 deletions hack/jenkins/cron/cleanup_and_reboot_Linux.sh
Original file line number Diff line number Diff line change
Expand Up @@ -72,12 +72,13 @@ function cleanup() {
echo -e "\nbefore the cleanup:"
overview
for DOM in $( sudo virsh list --all --name ); do
sudo virsh destroy "${DOM}" || true
if sudo virsh undefine "${DOM}"; then
echo "successfully deleted KVM domain:" "${DOM}"
continue
if sudo virsh destroy "${DOM}"; then
if sudo virsh undefine "${DOM}"; then
echo "successfully deleted KVM domain:" "${DOM}"
continue
fi
echo "unable to delete KVM domain:" "${DOM}"
fi
echo "unable to delete KVM domain:" "${DOM}"
done
#for POOL in $( sudo virsh pool-list --all --name ); do # better, but flag '--name' is not supported for 'virsh pool-list' command on older libvirt versions
for POOL in $( sudo virsh pool-list --all | awk 'NR>2 {print $1}' ); do
Expand All @@ -91,10 +92,11 @@ function cleanup() {
done
for NET in $( sudo virsh net-list --all --name ); do
if [ "${NET}" != "default" ]; then
sudo virsh net-destroy "${NET}" || true
if sudo virsh net-undefine "${NET}"; then
echo "successfully deleted KVM network" "${NET}"
continue
if sudo virsh net-destroy "${NET}"; then
if sudo virsh net-undefine "${NET}"; then
echo "successfully deleted KVM network" "${NET}"
continue
fi
fi
echo "unable to delete KVM network" "${NET}"
fi
Expand All @@ -111,9 +113,6 @@ function cleanup() {
done
echo -e "\nafter the cleanup:"
overview

# clean up /tmp
find /tmp -name . -o -prune -exec rm -rf -- {} + >/dev/null 2>&1 || true
}

# Give 15m for Linux-specific cleanup
Expand Down
2 changes: 1 addition & 1 deletion pkg/minikube/bootstrapper/bsutil/kubeadm.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana
return nil, errors.Wrap(err, "generating extra component config for kubeadm")
}

cnm, err := cni.New(&cc)
cnm, err := cni.New(cc)
if err != nil {
return nil, errors.Wrap(err, "cni")
}
Expand Down
4 changes: 1 addition & 3 deletions pkg/minikube/bootstrapper/images/images.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,13 +141,11 @@ func dashboardMetrics(repo string) string {
}

// KindNet returns the image used for kindnet
// ref: https://hub.docker.com/r/kindest/kindnetd/tags
// src: https://github.com/kubernetes-sigs/kind/tree/master/images/kindnetd
func KindNet(repo string) string {
if repo == "" {
repo = "kindest"
}
return path.Join(repo, "kindnetd:v20210326-1e038dc5")
return path.Join(repo, "kindnetd:v20210220-5b7e6d01")
}

// CalicoDaemonSet returns the image used for calicoDaemonSet
Expand Down
65 changes: 30 additions & 35 deletions pkg/minikube/bootstrapper/kubeadm/kubeadm.go
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,6 @@ func (k *Bootstrapper) init(cfg config.ClusterConfig) error {
}
kw.Close()
wg.Wait()

if err := k.applyCNI(cfg, true); err != nil {
return errors.Wrap(err, "apply cni")
}
Expand Down Expand Up @@ -331,7 +330,7 @@ func (k *Bootstrapper) applyCNI(cfg config.ClusterConfig, registerStep ...bool)
regStep = registerStep[0]
}

cnm, err := cni.New(&cfg)
cnm, err := cni.New(cfg)
if err != nil {
return errors.Wrap(err, "cni config")
}
Expand All @@ -352,6 +351,12 @@ func (k *Bootstrapper) applyCNI(cfg config.ClusterConfig, registerStep ...bool)
return errors.Wrap(err, "cni apply")
}

if cfg.KubernetesConfig.ContainerRuntime == constants.CRIO {
if err := cruntime.UpdateCRIONet(k.c, cnm.CIDR()); err != nil {
return errors.Wrap(err, "update crio")
}
}

return nil
}

Expand Down Expand Up @@ -676,30 +681,6 @@ func (k *Bootstrapper) restartControlPlane(cfg config.ClusterConfig) error {
}
}

cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c})
if err != nil {
return errors.Wrap(err, "runtime")
}

// We must ensure that the apiserver is healthy before proceeding
if err := kverify.WaitForAPIServerProcess(cr, k, cfg, k.c, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil {
return errors.Wrap(err, "apiserver healthz")
}

if err := kverify.WaitForHealthyAPIServer(cr, k, cfg, k.c, client, time.Now(), hostname, port, kconst.DefaultControlPlaneTimeout); err != nil {
return errors.Wrap(err, "apiserver health")
}

// because reboots clear /etc/cni
if err := k.applyCNI(cfg); err != nil {
return errors.Wrap(err, "apply cni")
}

if err := kverify.WaitForSystemPods(cr, k, cfg, k.c, client, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil {
return errors.Wrap(err, "system pods")
}

// must be called after applyCNI
if cfg.VerifyComponents[kverify.ExtraKey] {
// after kubelet is restarted (with 'kubeadm init phase kubelet-start' above),
// it appears as to be immediately Ready as well as all kube-system pods (last observed state),
Expand Down Expand Up @@ -728,6 +709,29 @@ func (k *Bootstrapper) restartControlPlane(cfg config.ClusterConfig) error {
}
}

cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c})
if err != nil {
return errors.Wrap(err, "runtime")
}

// We must ensure that the apiserver is healthy before proceeding
if err := kverify.WaitForAPIServerProcess(cr, k, cfg, k.c, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil {
return errors.Wrap(err, "apiserver healthz")
}

if err := kverify.WaitForHealthyAPIServer(cr, k, cfg, k.c, client, time.Now(), hostname, port, kconst.DefaultControlPlaneTimeout); err != nil {
return errors.Wrap(err, "apiserver health")
}

// because reboots clear /etc/cni
if err := k.applyCNI(cfg); err != nil {
return errors.Wrap(err, "apply cni")
}

if err := kverify.WaitForSystemPods(cr, k, cfg, k.c, client, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil {
return errors.Wrap(err, "system pods")
}

if err := kverify.NodePressure(client); err != nil {
adviseNodePressure(err, cfg.Name, cfg.Driver)
}
Expand All @@ -754,15 +758,6 @@ func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinC
// Join the master by specifying its token
joinCmd = fmt.Sprintf("%s --node-name=%s", joinCmd, config.MachineName(cc, n))

// avoid "Found multiple CRI sockets, please use --cri-socket to select one: /var/run/dockershim.sock, /var/run/crio/crio.sock" error
cr, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: k.c, Socket: cc.KubernetesConfig.CRISocket})
if err != nil {
return errors.Wrap(err, "runtime")
}
if sp := cr.SocketPath(); sp != "" {
joinCmd = fmt.Sprintf("%s --cri-socket=%s", joinCmd, sp)
}

if _, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", joinCmd)); err != nil {
return errors.Wrapf(err, "kubeadm join")
}
Expand Down
Loading

0 comments on commit b14789b

Please sign in to comment.