diff --git a/go.mod b/go.mod index 623e095a4a..03038c5b01 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( golang.org/x/tools v0.21.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/apimachinery v0.29.4 - sigs.k8s.io/kind v0.22.0 + sigs.k8s.io/kind v0.23.0 ) require ( diff --git a/go.sum b/go.sum index b7b2880997..e22627baeb 100644 --- a/go.sum +++ b/go.sum @@ -29,7 +29,6 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= @@ -85,7 +84,6 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k= github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o= -github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -188,7 +186,6 @@ github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31 github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -207,8 +204,6 @@ github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/letsencrypt/boulder v0.0.0-20231026200631-000cd05d5491 h1:WGrKdjHtWC67RX96eTkYD2f53NDHhrq/7robWTAfk4s= @@ -217,7 +212,6 @@ github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0V github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= @@ -234,7 +228,6 @@ github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3 github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= @@ -255,7 +248,6 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= -github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= @@ -300,7 +292,6 @@ github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -406,7 +397,6 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -462,7 +452,6 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= @@ -490,8 +479,7 @@ k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/kind v0.22.0 h1:z/+yr/azoOfzsfooqRsPw1wjJlqT/ukXP0ShkHwNlsI= -sigs.k8s.io/kind v0.22.0/go.mod h1:aBlbxg08cauDgZ612shr017/rZwqd7AS563FvpWKPVs= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/kind v0.23.0 h1:8fyDGWbWTeCcCTwA04v4Nfr45KKxbSPH1WO9K+jVrBg= +sigs.k8s.io/kind v0.23.0/go.mod h1:ZQ1iZuJLh3T+O8fzhdi3VWcFTzsdXtNv2ppsHc8JQ7s= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/vendor/modules.txt b/vendor/modules.txt index 350d2d84d9..61d6251ce9 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -710,8 +710,8 @@ k8s.io/klog/v2/internal/sloghandler k8s.io/utils/internal/third_party/forked/golang/net k8s.io/utils/net k8s.io/utils/strings/slices -# sigs.k8s.io/kind v0.22.0 -## explicit; go 1.16 +# sigs.k8s.io/kind v0.23.0 +## explicit; go 1.17 sigs.k8s.io/kind/pkg/apis/config/defaults sigs.k8s.io/kind/pkg/apis/config/v1alpha4 sigs.k8s.io/kind/pkg/cluster @@ -734,6 +734,7 @@ sigs.k8s.io/kind/pkg/cluster/internal/logs sigs.k8s.io/kind/pkg/cluster/internal/providers sigs.k8s.io/kind/pkg/cluster/internal/providers/common sigs.k8s.io/kind/pkg/cluster/internal/providers/docker +sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl sigs.k8s.io/kind/pkg/cluster/internal/providers/podman sigs.k8s.io/kind/pkg/cluster/nodes sigs.k8s.io/kind/pkg/cluster/nodeutils diff --git a/vendor/sigs.k8s.io/kind/pkg/apis/config/defaults/image.go b/vendor/sigs.k8s.io/kind/pkg/apis/config/defaults/image.go index da3864faeb..9d31a9dd26 100644 --- a/vendor/sigs.k8s.io/kind/pkg/apis/config/defaults/image.go +++ b/vendor/sigs.k8s.io/kind/pkg/apis/config/defaults/image.go @@ -18,4 +18,4 @@ limitations under the License. package defaults // Image is the default for the Config.Image field, aka the default node image. -const Image = "kindest/node:v1.29.2@sha256:51a1434a5397193442f0be2a297b488b6c919ce8a3931be0ce822606ea5ca245" +const Image = "kindest/node:v1.30.0@sha256:047357ac0cfea04663786a612ba1eaba9702bef25227a794b52890dd8bcd692e" diff --git a/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/types.go b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/types.go index 308a6853b8..33acf503fd 100644 --- a/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/types.go +++ b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/types.go @@ -186,7 +186,7 @@ type Networking struct { // If DisableDefaultCNI is true, kind will not install the default CNI setup. // Instead the user should install their own CNI after creating the cluster. DisableDefaultCNI bool `yaml:"disableDefaultCNI,omitempty" json:"disableDefaultCNI,omitempty"` - // KubeProxyMode defines if kube-proxy should operate in iptables or ipvs mode + // KubeProxyMode defines if kube-proxy should operate in iptables, ipvs or nftables mode // Defaults to 'iptables' mode KubeProxyMode ProxyMode `yaml:"kubeProxyMode,omitempty" json:"kubeProxyMode,omitempty"` // DNSSearch defines the DNS search domain to use for nodes. If not set, this will be inherited from the host. @@ -213,6 +213,8 @@ const ( IPTablesProxyMode ProxyMode = "iptables" // IPVSProxyMode sets ProxyMode to ipvs IPVSProxyMode ProxyMode = "ipvs" + // NFTablesProxyMode sets ProxyMode to nftables + NFTablesProxyMode ProxyMode = "nftables" ) // PatchJSON6902 represents an inline kustomize json 6902 patch diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/config.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/config.go index 6aa1758197..bd431c527c 100644 --- a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/config.go +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/config.go @@ -57,7 +57,7 @@ type ConfigData struct { // The Token for TLS bootstrap Token string - // KubeProxyMode defines the kube-proxy mode between iptables or ipvs + // KubeProxyMode defines the kube-proxy mode between iptables, ipvs or nftables KubeProxyMode string // The subnet used for pods PodSubnet string @@ -285,7 +285,7 @@ evictionHard: {{ range $index, $gate := .SortedFeatureGates }} "{{ (StructuralData $gate.Name) }}": {{ $gate.Value }} {{end}}{{end}} -{{if ne .KubeProxyMode "None"}} +{{if ne .KubeProxyMode "none"}} --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration @@ -302,6 +302,12 @@ conntrack: # Skip setting sysctl value "net.netfilter.nf_conntrack_max" # It is a global variable that affects other namespaces maxPerCore: 0 +# Set sysctl value "net.netfilter.nf_conntrack_tcp_be_liberal" +# for nftables proxy (theoretically for kernels older than 6.1) +# xref: https://github.com/kubernetes/kubernetes/issues/117924 +{{if and (eq .KubeProxyMode "nftables") (not .RootlessProvider)}} + tcpBeLiberal: true +{{end}} {{if .RootlessProvider}} # Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established" tcpEstablishedTimeout: 0s @@ -423,7 +429,7 @@ evictionHard: "{{ (StructuralData $gate.Name) }}": {{ $gate.Value }} {{end}}{{end}} {{if .DisableLocalStorageCapacityIsolation}}localStorageCapacityIsolation: false{{end}} -{{if ne .KubeProxyMode "None"}} +{{if ne .KubeProxyMode "none"}} --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration @@ -440,6 +446,12 @@ conntrack: # Skip setting sysctl value "net.netfilter.nf_conntrack_max" # It is a global variable that affects other namespaces maxPerCore: 0 +# Set sysctl value "net.netfilter.nf_conntrack_tcp_be_liberal" +# for nftables proxy (theoretically for kernels older than 6.1) +# xref: https://github.com/kubernetes/kubernetes/issues/117924 +{{if and (eq .KubeProxyMode "nftables") (not .RootlessProvider)}} + tcpBeLiberal: true +{{end}} {{if .RootlessProvider}} # Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established" tcpEstablishedTimeout: 0s diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/provision.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/provision.go index 3a6e1a70ce..b2d6bbea23 100644 --- a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/provision.go +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/provision.go @@ -403,10 +403,7 @@ func generatePortMappings(clusterIPFamily config.ClusterIPFamily, portMappings . } func createContainer(name string, args []string) error { - if err := exec.Command("docker", append([]string{"run", "--name", name}, args...)...).Run(); err != nil { - return err - } - return nil + return exec.Command("docker", append([]string{"run", "--name", name}, args...)...).Run() } func createContainerWithWaitUntilSystemdReachesMultiUserSystem(name string, args []string) error { diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/OWNERS b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/OWNERS new file mode 100644 index 0000000000..71a8b4d7bd --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/OWNERS @@ -0,0 +1,2 @@ +labels: +- area/provider/nerdctl diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/constants.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/constants.go new file mode 100644 index 0000000000..6b77abb011 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/constants.go @@ -0,0 +1,24 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nerdctl + +// clusterLabelKey is applied to each "node" container for identification +const clusterLabelKey = "io.x-k8s.kind.cluster" + +// nodeRoleLabelKey is applied to each "node" container for categorization +// of nodes by role +const nodeRoleLabelKey = "io.x-k8s.kind.role" diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/images.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/images.go new file mode 100644 index 0000000000..c090138f72 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/images.go @@ -0,0 +1,91 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nerdctl + +import ( + "fmt" + "strings" + "time" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + "sigs.k8s.io/kind/pkg/log" + + "sigs.k8s.io/kind/pkg/cluster/internal/providers/common" + "sigs.k8s.io/kind/pkg/internal/apis/config" + "sigs.k8s.io/kind/pkg/internal/cli" +) + +// ensureNodeImages ensures that the node images used by the create +// configuration are present +func ensureNodeImages(logger log.Logger, status *cli.Status, cfg *config.Cluster, binaryName string) error { + // pull each required image + for _, image := range common.RequiredNodeImages(cfg).List() { + // prints user friendly message + friendlyImageName, image := sanitizeImage(image) + status.Start(fmt.Sprintf("Ensuring node image (%s) 🖼", friendlyImageName)) + if _, err := pullIfNotPresent(logger, image, 4, binaryName); err != nil { + status.End(false) + return err + } + } + return nil +} + +// pullIfNotPresent will pull an image if it is not present locally +// retrying up to retries times +// it returns true if it attempted to pull, and any errors from pulling +func pullIfNotPresent(logger log.Logger, image string, retries int, binaryName string) (pulled bool, err error) { + // TODO(bentheelder): switch most (all) of the logging here to debug level + // once we have configurable log levels + // if this did not return an error, then the image exists locally + cmd := exec.Command(binaryName, "inspect", "--type=image", image) + if err := cmd.Run(); err == nil { + logger.V(1).Infof("Image: %s present locally", image) + return false, nil + } + // otherwise try to pull it + return true, pull(logger, image, retries, binaryName) +} + +// pull pulls an image, retrying up to retries times +func pull(logger log.Logger, image string, retries int, binaryName string) error { + logger.V(1).Infof("Pulling image: %s ...", image) + err := exec.Command(binaryName, "pull", image).Run() + // retry pulling up to retries times if necessary + if err != nil { + for i := 0; i < retries; i++ { + time.Sleep(time.Second * time.Duration(i+1)) + logger.V(1).Infof("Trying again to pull image: %q ... %v", image, err) + // TODO(bentheelder): add some backoff / sleep? + err = exec.Command(binaryName, "pull", image).Run() + if err == nil { + break + } + } + } + return errors.Wrapf(err, "failed to pull image %q", image) +} + +// sanitizeImage is a helper to return human readable image name and +// the docker pullable image name from the provided image +func sanitizeImage(image string) (string, string) { + if strings.Contains(image, "@sha256:") { + return strings.Split(image, "@sha256:")[0], image + } + return image, image +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/network.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/network.go new file mode 100644 index 0000000000..e9a1d21e8b --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/network.go @@ -0,0 +1,187 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nerdctl + +import ( + "crypto/sha1" + "encoding/binary" + "fmt" + "net" + "strconv" + "strings" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" +) + +// This may be overridden by KIND_EXPERIMENTAL_DOCKER_NETWORK env, +// experimentally... +// +// By default currently picking a single network is equivalent to the previous +// behavior *except* that we moved from the default bridge to a user defined +// network because the default bridge is actually special versus any other +// docker network and lacks the embedded DNS +// +// For now this also makes it easier for apps to join the same network, and +// leaves users with complex networking desires to create and manage their own +// networks. +const fixedNetworkName = "kind" + +// ensureNetwork checks if docker network by name exists, if not it creates it +func ensureNetwork(name, binaryName string) error { + // check if network exists already and remove any duplicate networks + exists, err := checkIfNetworkExists(name, binaryName) + if err != nil { + return err + } + + // network already exists, we're good + // TODO: the network might already exist and not have ipv6 ... :| + // discussion: https://github.com/kubernetes-sigs/kind/pull/1508#discussion_r414594198 + if exists { + return nil + } + + subnet := generateULASubnetFromName(name, 0) + mtu := getDefaultNetworkMTU(binaryName) + err = createNetwork(name, subnet, mtu, binaryName) + if err == nil { + // Success! + return nil + } + + // On the first try check if ipv6 fails entirely on this machine + // https://github.com/kubernetes-sigs/kind/issues/1544 + // Otherwise if it's not a pool overlap error, fail + // If it is, make more attempts below + if isIPv6UnavailableError(err) { + // only one attempt, IPAM is automatic in ipv4 only + return createNetwork(name, "", mtu, binaryName) + } + if isPoolOverlapError(err) { + // pool overlap suggests perhaps another process created the network + // check if network exists already and remove any duplicate networks + exists, err := checkIfNetworkExists(name, binaryName) + if err != nil { + return err + } + if exists { + return nil + } + // otherwise we'll start trying with different subnets + } else { + // unknown error ... + return err + } + + // keep trying for ipv6 subnets + const maxAttempts = 5 + for attempt := int32(1); attempt < maxAttempts; attempt++ { + subnet := generateULASubnetFromName(name, attempt) + err = createNetwork(name, subnet, mtu, binaryName) + if err == nil { + // success! + return nil + } + if isPoolOverlapError(err) { + // pool overlap suggests perhaps another process created the network + // check if network exists already and remove any duplicate networks + exists, err := checkIfNetworkExists(name, binaryName) + if err != nil { + return err + } + if exists { + return nil + } + // otherwise we'll try again + continue + } + // unknown error ... + return err + } + return errors.New("exhausted attempts trying to find a non-overlapping subnet") +} + +func createNetwork(name, ipv6Subnet string, mtu int, binaryName string) error { + args := []string{"network", "create", "-d=bridge"} + // TODO: Not supported in nerdctl yet + // "-o", "com.docker.network.bridge.enable_ip_masquerade=true", + if mtu > 0 { + args = append(args, "-o", fmt.Sprintf("com.docker.network.driver.mtu=%d", mtu)) + } + if ipv6Subnet != "" { + args = append(args, "--ipv6", "--subnet", ipv6Subnet) + } + args = append(args, name) + return exec.Command(binaryName, args...).Run() +} + +// getDefaultNetworkMTU obtains the MTU from the docker default network +func getDefaultNetworkMTU(binaryName string) int { + cmd := exec.Command(binaryName, "network", "inspect", "bridge", + "-f", `{{ index .Options "com.docker.network.driver.mtu" }}`) + lines, err := exec.OutputLines(cmd) + if err != nil || len(lines) != 1 { + return 0 + } + mtu, err := strconv.Atoi(lines[0]) + if err != nil { + return 0 + } + return mtu +} + +func checkIfNetworkExists(name, binaryName string) (bool, error) { + out, err := exec.Output(exec.Command( + binaryName, "network", "inspect", + name, "--format={{.Name}}", + )) + if err != nil { + return false, nil + } + return strings.HasPrefix(string(out), name), err +} + +func isIPv6UnavailableError(err error) bool { + rerr := exec.RunErrorForError(err) + return rerr != nil && strings.HasPrefix(string(rerr.Output), "Error response from daemon: Cannot read IPv6 setup for bridge") +} + +func isPoolOverlapError(err error) bool { + rerr := exec.RunErrorForError(err) + return rerr != nil && strings.HasPrefix(string(rerr.Output), "Error response from daemon: Pool overlaps with other one on this address space") || strings.Contains(string(rerr.Output), "networks have overlapping") +} + +// generateULASubnetFromName generate an IPv6 subnet based on the +// name and Nth probing attempt +func generateULASubnetFromName(name string, attempt int32) string { + ip := make([]byte, 16) + ip[0] = 0xfc + ip[1] = 0x00 + h := sha1.New() + _, _ = h.Write([]byte(name)) + _ = binary.Write(h, binary.LittleEndian, attempt) + bs := h.Sum(nil) + for i := 2; i < 8; i++ { + ip[i] = bs[i] + } + subnet := &net.IPNet{ + IP: net.IP(ip), + Mask: net.CIDRMask(64, 128), + } + return subnet.String() +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/node.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/node.go new file mode 100644 index 0000000000..a6a64e5aee --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/node.go @@ -0,0 +1,175 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nerdctl + +import ( + "context" + "fmt" + "io" + "strings" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" +) + +// nodes.Node implementation for the docker provider +type node struct { + name string + binaryName string +} + +func (n *node) String() string { + return n.name +} + +func (n *node) Role() (string, error) { + cmd := exec.Command(n.binaryName, "inspect", + "--format", fmt.Sprintf(`{{ index .Config.Labels "%s"}}`, nodeRoleLabelKey), + n.name, + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return "", errors.Wrap(err, "failed to get role for node") + } + if len(lines) != 1 { + return "", errors.Errorf("failed to get role for node: output lines %d != 1", len(lines)) + } + return lines[0], nil +} + +func (n *node) IP() (ipv4 string, ipv6 string, err error) { + // retrieve the IP address of the node using docker inspect + cmd := exec.Command(n.binaryName, "inspect", + "-f", "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}", + n.name, // ... against the "node" container + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return "", "", errors.Wrap(err, "failed to get container details") + } + if len(lines) != 1 { + return "", "", errors.Errorf("file should only be one line, got %d lines", len(lines)) + } + ips := strings.Split(lines[0], ",") + if len(ips) != 2 { + return "", "", errors.Errorf("container addresses should have 2 values, got %d values", len(ips)) + } + return ips[0], ips[1], nil +} + +func (n *node) Command(command string, args ...string) exec.Cmd { + return &nodeCmd{ + binaryName: n.binaryName, + nameOrID: n.name, + command: command, + args: args, + } +} + +func (n *node) CommandContext(ctx context.Context, command string, args ...string) exec.Cmd { + return &nodeCmd{ + binaryName: n.binaryName, + nameOrID: n.name, + command: command, + args: args, + ctx: ctx, + } +} + +// nodeCmd implements exec.Cmd for docker nodes +type nodeCmd struct { + binaryName string + nameOrID string // the container name or ID + command string + args []string + env []string + stdin io.Reader + stdout io.Writer + stderr io.Writer + ctx context.Context +} + +func (c *nodeCmd) Run() error { + args := []string{ + "exec", + // run with privileges so we can remount etc.. + // this might not make sense in the most general sense, but it is + // important to many kind commands + "--privileged", + } + if c.stdin != nil { + args = append(args, + "-i", // interactive so we can supply input + ) + } + // set env + for _, env := range c.env { + args = append(args, "-e", env) + } + // specify the container and command, after this everything will be + // args the command in the container rather than to docker + args = append( + args, + c.nameOrID, // ... against the container + c.command, // with the command specified + ) + args = append( + args, + // finally, with the caller args + c.args..., + ) + var cmd exec.Cmd + if c.ctx != nil { + cmd = exec.CommandContext(c.ctx, c.binaryName, args...) + } else { + cmd = exec.Command(c.binaryName, args...) + } + if c.stdin != nil { + cmd.SetStdin(c.stdin) + } + if c.stderr != nil { + cmd.SetStderr(c.stderr) + } + if c.stdout != nil { + cmd.SetStdout(c.stdout) + } + return cmd.Run() +} + +func (c *nodeCmd) SetEnv(env ...string) exec.Cmd { + c.env = env + return c +} + +func (c *nodeCmd) SetStdin(r io.Reader) exec.Cmd { + c.stdin = r + return c +} + +func (c *nodeCmd) SetStdout(w io.Writer) exec.Cmd { + c.stdout = w + return c +} + +func (c *nodeCmd) SetStderr(w io.Writer) exec.Cmd { + c.stderr = w + return c +} + +func (n *node) SerialLogs(w io.Writer) error { + return exec.Command(n.binaryName, "logs", n.name).SetStdout(w).SetStderr(w).Run() +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/provider.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/provider.go new file mode 100644 index 0000000000..05bba6b18d --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/provider.go @@ -0,0 +1,392 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nerdctl + +import ( + "encoding/csv" + "encoding/json" + "fmt" + "net" + osexec "os/exec" + "path/filepath" + "strings" + + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + "sigs.k8s.io/kind/pkg/log" + + internallogs "sigs.k8s.io/kind/pkg/cluster/internal/logs" + "sigs.k8s.io/kind/pkg/cluster/internal/providers" + "sigs.k8s.io/kind/pkg/cluster/internal/providers/common" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + "sigs.k8s.io/kind/pkg/internal/apis/config" + "sigs.k8s.io/kind/pkg/internal/cli" + "sigs.k8s.io/kind/pkg/internal/sets" +) + +// NewProvider returns a new provider based on executing `nerdctl ...` +func NewProvider(logger log.Logger, binaryName string) providers.Provider { + // if binaryName is unset, do a lookup; we may be here via a + // library call to provider.DetectNodeProvider(), which returns + // true from nerdctl.IsAvailable() by checking for both finch + // and nerdctl. If we don't redo the lookup here, then a finch + // install that triggered IsAvailable() to be true would fail + // to be used if we default to nerdctl when unset. + if binaryName == "" { + // default to "nerdctl"; but look for "finch" if + // nerctl binary lookup fails + binaryName = "nerdctl" + if _, err := osexec.LookPath("nerdctl"); err != nil { + if _, err := osexec.LookPath("finch"); err == nil { + binaryName = "finch" + } + } + } + return &provider{ + logger: logger, + binaryName: binaryName, + } +} + +// Provider implements provider.Provider +// see NewProvider +type provider struct { + logger log.Logger + binaryName string + info *providers.ProviderInfo +} + +// String implements fmt.Stringer +// NOTE: the value of this should not currently be relied upon for anything! +// This is only used for setting the Node's providerID +func (p *provider) String() string { + return "nerdctl" +} + +func (p *provider) Binary() string { + return p.binaryName +} + +// Provision is part of the providers.Provider interface +func (p *provider) Provision(status *cli.Status, cfg *config.Cluster) (err error) { + // TODO: validate cfg + // ensure node images are pulled before actually provisioning + if err := ensureNodeImages(p.logger, status, cfg, p.Binary()); err != nil { + return err + } + + // ensure the pre-requisite network exists + if err := ensureNetwork(fixedNetworkName, p.Binary()); err != nil { + return errors.Wrap(err, "failed to ensure nerdctl network") + } + + // actually provision the cluster + icons := strings.Repeat("📦 ", len(cfg.Nodes)) + status.Start(fmt.Sprintf("Preparing nodes %s", icons)) + defer func() { status.End(err == nil) }() + + // plan creating the containers + createContainerFuncs, err := planCreation(cfg, fixedNetworkName, p.Binary()) + if err != nil { + return err + } + + // actually create nodes + // TODO: remove once nerdctl handles concurrency better + // xref: https://github.com/containerd/nerdctl/issues/2908 + for _, f := range createContainerFuncs { + if err := f(); err != nil { + return err + } + } + return nil +} + +// ListClusters is part of the providers.Provider interface +func (p *provider) ListClusters() ([]string, error) { + cmd := exec.Command(p.Binary(), + "ps", + "-a", // show stopped nodes + // filter for nodes with the cluster label + "--filter", "label="+clusterLabelKey, + // format to include the cluster name + "--format", fmt.Sprintf(`{{index .Labels "%s"}}`, clusterLabelKey), + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return nil, errors.Wrap(err, "failed to list clusters") + } + return sets.NewString(lines...).List(), nil +} + +// ListNodes is part of the providers.Provider interface +func (p *provider) ListNodes(cluster string) ([]nodes.Node, error) { + cmd := exec.Command(p.Binary(), + "ps", + "-a", // show stopped nodes + // filter for nodes with the cluster label + "--filter", fmt.Sprintf("label=%s=%s", clusterLabelKey, cluster), + // format to include the cluster name + "--format", `{{.Names}}`, + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return nil, errors.Wrap(err, "failed to list nodes") + } + length := len(lines) + // convert names to node handles + ret := make([]nodes.Node, 0, length) + for _, name := range lines { + if name != "" { + ret = append(ret, p.node(name)) + } + } + return ret, nil +} + +// DeleteNodes is part of the providers.Provider interface +func (p *provider) DeleteNodes(n []nodes.Node) error { + if len(n) == 0 { + return nil + } + argsNoRestart := make([]string, 0, len(n)+2) + argsNoRestart = append(argsNoRestart, + "update", + "--restart=no", + ) + argsStop := make([]string, 0, len(n)+1) + argsStop = append(argsStop, "stop") + argsWait := make([]string, 0, len(n)+1) + argsWait = append(argsWait, "wait") + + argsRm := make([]string, 0, len(n)+3) // allocate once + argsRm = append(argsRm, + "rm", + "-f", + "-v", // delete volumes + ) + for _, node := range n { + argsRm = append(argsRm, node.String()) + argsStop = append(argsStop, node.String()) + argsWait = append(argsWait, node.String()) + argsNoRestart = append(argsNoRestart, node.String()) + } + if err := exec.Command(p.Binary(), argsNoRestart...).Run(); err != nil { + return errors.Wrap(err, "failed to update restart policy to 'no'") + } + if err := exec.Command(p.Binary(), argsStop...).Run(); err != nil { + return errors.Wrap(err, "failed to stop nodes") + } + if err := exec.Command(p.Binary(), argsWait...).Run(); err != nil { + return errors.Wrap(err, "failed to wait for node exit") + } + if err := exec.Command(p.Binary(), argsRm...).Run(); err != nil { + return errors.Wrap(err, "failed to delete nodes") + } + return nil +} + +// GetAPIServerEndpoint is part of the providers.Provider interface +func (p *provider) GetAPIServerEndpoint(cluster string) (string, error) { + // locate the node that hosts this + allNodes, err := p.ListNodes(cluster) + if err != nil { + return "", errors.Wrap(err, "failed to list nodes") + } + n, err := nodeutils.APIServerEndpointNode(allNodes) + if err != nil { + return "", errors.Wrap(err, "failed to get api server endpoint") + } + + // if the 'desktop.docker.io/ports//tcp' label is present, + // defer to its value for the api server endpoint + // + // For example: + // "Labels": { + // "desktop.docker.io/ports/6443/tcp": "10.0.1.7:6443", + // } + cmd := exec.Command( + p.Binary(), "inspect", + "--format", fmt.Sprintf( + "{{ index .Config.Labels \"desktop.docker.io/ports/%d/tcp\" }}", common.APIServerInternalPort, + ), + n.String(), + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return "", errors.Wrap(err, "failed to get api server port") + } + if len(lines) == 1 && lines[0] != "" { + return lines[0], nil + } + + // else, retrieve the specific port mapping via NetworkSettings.Ports + cmd = exec.Command( + p.Binary(), "inspect", + "--format", fmt.Sprintf( + "{{ with (index (index .NetworkSettings.Ports \"%d/tcp\") 0) }}{{ printf \"%%s\t%%s\" .HostIp .HostPort }}{{ end }}", common.APIServerInternalPort, + ), + n.String(), + ) + lines, err = exec.OutputLines(cmd) + if err != nil { + return "", errors.Wrap(err, "failed to get api server port") + } + if len(lines) != 1 { + return "", errors.Errorf("network details should only be one line, got %d lines", len(lines)) + } + parts := strings.Split(lines[0], "\t") + if len(parts) != 2 { + return "", errors.Errorf("network details should only be two parts, got %d", len(parts)) + } + + // join host and port + return net.JoinHostPort(parts[0], parts[1]), nil +} + +// GetAPIServerInternalEndpoint is part of the providers.Provider interface +func (p *provider) GetAPIServerInternalEndpoint(cluster string) (string, error) { + // locate the node that hosts this + allNodes, err := p.ListNodes(cluster) + if err != nil { + return "", errors.Wrap(err, "failed to list nodes") + } + n, err := nodeutils.APIServerEndpointNode(allNodes) + if err != nil { + return "", errors.Wrap(err, "failed to get api server endpoint") + } + // NOTE: we're using the nodes's hostnames which are their names + return net.JoinHostPort(n.String(), fmt.Sprintf("%d", common.APIServerInternalPort)), nil +} + +// node returns a new node handle for this provider +func (p *provider) node(name string) nodes.Node { + return &node{ + binaryName: p.binaryName, + name: name, + } +} + +// CollectLogs will populate dir with cluster logs and other debug files +func (p *provider) CollectLogs(dir string, nodes []nodes.Node) error { + execToPathFn := func(cmd exec.Cmd, path string) func() error { + return func() error { + f, err := common.FileOnHost(path) + if err != nil { + return err + } + defer f.Close() + return cmd.SetStdout(f).SetStderr(f).Run() + } + } + // construct a slice of methods to collect logs + fns := []func() error{ + // record info about the host nerdctl + execToPathFn( + exec.Command(p.Binary(), "info"), + filepath.Join(dir, "docker-info.txt"), + ), + } + + // collect /var/log for each node and plan collecting more logs + var errs []error + for _, n := range nodes { + node := n // https://golang.org/doc/faq#closures_and_goroutines + name := node.String() + path := filepath.Join(dir, name) + if err := internallogs.DumpDir(p.logger, node, "/var/log", path); err != nil { + errs = append(errs, err) + } + + fns = append(fns, + func() error { return common.CollectLogs(node, path) }, + execToPathFn(exec.Command(p.Binary(), "inspect", name), filepath.Join(path, "inspect.json")), + func() error { + f, err := common.FileOnHost(filepath.Join(path, "serial.log")) + if err != nil { + return err + } + defer f.Close() + return node.SerialLogs(f) + }, + ) + } + + // run and collect up all errors + errs = append(errs, errors.AggregateConcurrent(fns)) + return errors.NewAggregate(errs) +} + +// Info returns the provider info. +// The info is cached on the first time of the execution. +func (p *provider) Info() (*providers.ProviderInfo, error) { + var err error + if p.info == nil { + p.info, err = info(p.Binary()) + } + return p.info, err +} + +// dockerInfo corresponds to `docker info --format '{{json .}}'` +type dockerInfo struct { + CgroupDriver string `json:"CgroupDriver"` // "systemd", "cgroupfs", "none" + CgroupVersion string `json:"CgroupVersion"` // e.g. "2" + MemoryLimit bool `json:"MemoryLimit"` + PidsLimit bool `json:"PidsLimit"` + CPUShares bool `json:"CPUShares"` + SecurityOptions []string `json:"SecurityOptions"` +} + +func info(binaryName string) (*providers.ProviderInfo, error) { + cmd := exec.Command(binaryName, "info", "--format", "{{json .}}") + out, err := exec.Output(cmd) + if err != nil { + return nil, errors.Wrap(err, "failed to get nerdctl info") + } + var dInfo dockerInfo + if err := json.Unmarshal(out, &dInfo); err != nil { + return nil, err + } + info := providers.ProviderInfo{ + Cgroup2: dInfo.CgroupVersion == "2", + } + // When CgroupDriver == "none", the MemoryLimit/PidsLimit/CPUShares + // values are meaningless and need to be considered false. + // https://github.com/moby/moby/issues/42151 + if dInfo.CgroupDriver != "none" { + info.SupportsMemoryLimit = dInfo.MemoryLimit + info.SupportsPidsLimit = dInfo.PidsLimit + info.SupportsCPUShares = dInfo.CPUShares + } + for _, o := range dInfo.SecurityOptions { + // o is like "name=seccomp,profile=default", or "name=rootless", + csvReader := csv.NewReader(strings.NewReader(o)) + sliceSlice, err := csvReader.ReadAll() + if err != nil { + return nil, err + } + for _, f := range sliceSlice { + for _, ff := range f { + if ff == "name=rootless" { + info.Rootless = true + } + } + } + } + return &info, nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/provision.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/provision.go new file mode 100644 index 0000000000..d754b38d03 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/provision.go @@ -0,0 +1,388 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nerdctl + +import ( + "context" + "fmt" + "net" + "path/filepath" + "strings" + "time" + + "sigs.k8s.io/kind/pkg/cluster/constants" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + "sigs.k8s.io/kind/pkg/fs" + + "sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer" + "sigs.k8s.io/kind/pkg/cluster/internal/providers/common" + "sigs.k8s.io/kind/pkg/internal/apis/config" +) + +// planCreation creates a slice of funcs that will create the containers +func planCreation(cfg *config.Cluster, networkName, binaryName string) (createContainerFuncs []func() error, err error) { + // we need to know all the names for NO_PROXY + // compute the names first before any actual node details + nodeNamer := common.MakeNodeNamer(cfg.Name) + names := make([]string, len(cfg.Nodes)) + for i, node := range cfg.Nodes { + name := nodeNamer(string(node.Role)) // name the node + names[i] = name + } + haveLoadbalancer := config.ClusterHasImplicitLoadBalancer(cfg) + if haveLoadbalancer { + names = append(names, nodeNamer(constants.ExternalLoadBalancerNodeRoleValue)) + } + + // these apply to all container creation + genericArgs, err := commonArgs(cfg.Name, cfg, networkName, names, binaryName) + if err != nil { + return nil, err + } + + // only the external LB should reflect the port if we have multiple control planes + apiServerPort := cfg.Networking.APIServerPort + apiServerAddress := cfg.Networking.APIServerAddress + if haveLoadbalancer { + // TODO: picking ports locally is less than ideal with remote docker + // but this is supposed to be an implementation detail and NOT picking + // them breaks host reboot ... + // For now remote docker + multi control plane is not supported + apiServerPort = 0 // replaced with random ports + apiServerAddress = "127.0.0.1" // only the LB needs to be non-local + // only for IPv6 only clusters + if cfg.Networking.IPFamily == config.IPv6Family { + apiServerAddress = "::1" // only the LB needs to be non-local + } + // plan loadbalancer node + name := names[len(names)-1] + createContainerFuncs = append(createContainerFuncs, func() error { + args, err := runArgsForLoadBalancer(cfg, name, genericArgs) + if err != nil { + return err + } + return createContainer(name, args, binaryName) + }) + } + + // plan normal nodes + for i, node := range cfg.Nodes { + node := node.DeepCopy() // copy so we can modify + name := names[i] + + // fixup relative paths, docker can only handle absolute paths + for m := range node.ExtraMounts { + hostPath := node.ExtraMounts[m].HostPath + if !fs.IsAbs(hostPath) { + absHostPath, err := filepath.Abs(hostPath) + if err != nil { + return nil, errors.Wrapf(err, "unable to resolve absolute path for hostPath: %q", hostPath) + } + node.ExtraMounts[m].HostPath = absHostPath + } + } + + // plan actual creation based on role + switch node.Role { + case config.ControlPlaneRole: + createContainerFuncs = append(createContainerFuncs, func() error { + node.ExtraPortMappings = append(node.ExtraPortMappings, + config.PortMapping{ + ListenAddress: apiServerAddress, + HostPort: apiServerPort, + ContainerPort: common.APIServerInternalPort, + }, + ) + args, err := runArgsForNode(node, cfg.Networking.IPFamily, name, genericArgs) + if err != nil { + return err + } + return createContainerWithWaitUntilSystemdReachesMultiUserSystem(name, args, binaryName) + }) + case config.WorkerRole: + createContainerFuncs = append(createContainerFuncs, func() error { + args, err := runArgsForNode(node, cfg.Networking.IPFamily, name, genericArgs) + if err != nil { + return err + } + return createContainerWithWaitUntilSystemdReachesMultiUserSystem(name, args, binaryName) + }) + default: + return nil, errors.Errorf("unknown node role: %q", node.Role) + } + } + return createContainerFuncs, nil +} + +// commonArgs computes static arguments that apply to all containers +func commonArgs(cluster string, cfg *config.Cluster, networkName string, nodeNames []string, binaryName string) ([]string, error) { + // standard arguments all nodes containers need, computed once + args := []string{ + "--detach", // run the container detached + "--tty", // allocate a tty for entrypoint logs + // label the node with the cluster ID + "--label", fmt.Sprintf("%s=%s", clusterLabelKey, cluster), + // user a user defined network so we get embedded DNS + "--net", networkName, + // containerd supports the following restart modes: + // - no + // - on-failure[:max-retries] + // - unless-stopped + // - always + // + // What we desire is: + // - restart on host / container runtime reboot + // - don't restart for any other reason + // + "--restart=on-failure:1", + // this can be enabled by default in docker daemon.json, so we explicitly + // disable it, we want our entrypoint to be PID1, not docker-init / tini + "--init=false", + } + + // enable IPv6 if necessary + if config.ClusterHasIPv6(cfg) { + args = append(args, "--sysctl=net.ipv6.conf.all.disable_ipv6=0", "--sysctl=net.ipv6.conf.all.forwarding=1") + } + + // pass proxy environment variables + proxyEnv, err := getProxyEnv(cfg, networkName, nodeNames, binaryName) + if err != nil { + return nil, errors.Wrap(err, "proxy setup error") + } + for key, val := range proxyEnv { + args = append(args, "-e", fmt.Sprintf("%s=%s", key, val)) + } + + // enable /dev/fuse explicitly for fuse-overlayfs + // (Rootless Docker does not automatically mount /dev/fuse with --privileged) + if mountFuse(binaryName) { + args = append(args, "--device", "/dev/fuse") + } + + if cfg.Networking.DNSSearch != nil { + args = append(args, "-e", "KIND_DNS_SEARCH="+strings.Join(*cfg.Networking.DNSSearch, " ")) + } + + return args, nil +} + +func runArgsForNode(node *config.Node, clusterIPFamily config.ClusterIPFamily, name string, args []string) ([]string, error) { + args = append([]string{ + "--hostname", name, // make hostname match container name + // label the node with the role ID + "--label", fmt.Sprintf("%s=%s", nodeRoleLabelKey, node.Role), + // running containers in a container requires privileged + // NOTE: we could try to replicate this with --cap-add, and use less + // privileges, but this flag also changes some mounts that are necessary + // including some ones docker would otherwise do by default. + // for now this is what we want. in the future we may revisit this. + "--privileged", + "--security-opt", "seccomp=unconfined", // also ignore seccomp + "--security-opt", "apparmor=unconfined", // also ignore apparmor + // runtime temporary storage + "--tmpfs", "/tmp", // various things depend on working /tmp + "--tmpfs", "/run", // systemd wants a writable /run + // runtime persistent storage + // this ensures that E.G. pods, logs etc. are not on the container + // filesystem, which is not only better for performance, but allows + // running kind in kind for "party tricks" + // (please don't depend on doing this though!) + "--volume", "/var", + // some k8s things want to read /lib/modules + "--volume", "/lib/modules:/lib/modules:ro", + // propagate KIND_EXPERIMENTAL_CONTAINERD_SNAPSHOTTER to the entrypoint script + "-e", "KIND_EXPERIMENTAL_CONTAINERD_SNAPSHOTTER", + }, + args..., + ) + + // convert mounts and port mappings to container run args + args = append(args, generateMountBindings(node.ExtraMounts...)...) + mappingArgs, err := generatePortMappings(clusterIPFamily, node.ExtraPortMappings...) + if err != nil { + return nil, err + } + args = append(args, mappingArgs...) + + switch node.Role { + case config.ControlPlaneRole: + args = append(args, "-e", "KUBECONFIG=/etc/kubernetes/admin.conf") + } + + // finally, specify the image to run + return append(args, node.Image), nil +} + +func runArgsForLoadBalancer(cfg *config.Cluster, name string, args []string) ([]string, error) { + args = append([]string{ + "--hostname", name, // make hostname match container name + // label the node with the role ID + "--label", fmt.Sprintf("%s=%s", nodeRoleLabelKey, constants.ExternalLoadBalancerNodeRoleValue), + }, + args..., + ) + + // load balancer port mapping + mappingArgs, err := generatePortMappings(cfg.Networking.IPFamily, + config.PortMapping{ + ListenAddress: cfg.Networking.APIServerAddress, + HostPort: cfg.Networking.APIServerPort, + ContainerPort: common.APIServerInternalPort, + }, + ) + if err != nil { + return nil, err + } + args = append(args, mappingArgs...) + + // finally, specify the image to run + return append(args, loadbalancer.Image), nil +} + +func getProxyEnv(cfg *config.Cluster, networkName string, nodeNames []string, binaryName string) (map[string]string, error) { + envs := common.GetProxyEnvs(cfg) + // Specifically add the docker network subnets to NO_PROXY if we are using a proxy + if len(envs) > 0 { + subnets, err := getSubnets(networkName, binaryName) + if err != nil { + return nil, err + } + + noProxyList := append(subnets, envs[common.NOProxy]) + noProxyList = append(noProxyList, nodeNames...) + // Add pod and service dns names to no_proxy to allow in cluster + // Note: this is best effort based on the default CoreDNS spec + // https://github.com/kubernetes/dns/blob/master/docs/specification.md + // Any user created pod/service hostnames, namespaces, custom DNS services + // are expected to be no-proxied by the user explicitly. + noProxyList = append(noProxyList, ".svc", ".svc.cluster", ".svc.cluster.local") + noProxyJoined := strings.Join(noProxyList, ",") + envs[common.NOProxy] = noProxyJoined + envs[strings.ToLower(common.NOProxy)] = noProxyJoined + } + return envs, nil +} + +func getSubnets(networkName, binaryName string) ([]string, error) { + format := `{{range (index (index . "IPAM") "Config")}}{{index . "Subnet"}} {{end}}` + cmd := exec.Command(binaryName, "network", "inspect", "-f", format, networkName) + lines, err := exec.OutputLines(cmd) + if err != nil { + return nil, errors.Wrap(err, "failed to get subnets") + } + return strings.Split(strings.TrimSpace(lines[0]), " "), nil +} + +// generateMountBindings converts the mount list to a list of args for docker +// ':[:options]', where 'options' +// is a comma-separated list of the following strings: +// 'ro', if the path is read only +// 'Z', if the volume requires SELinux relabeling +func generateMountBindings(mounts ...config.Mount) []string { + args := make([]string, 0, len(mounts)) + for _, m := range mounts { + bind := fmt.Sprintf("%s:%s", m.HostPath, m.ContainerPath) + var attrs []string + if m.Readonly { + attrs = append(attrs, "ro") + } + // Only request relabeling if the pod provides an SELinux context. If the pod + // does not provide an SELinux context relabeling will label the volume with + // the container's randomly allocated MCS label. This would restrict access + // to the volume to the container which mounts it first. + if m.SelinuxRelabel { + attrs = append(attrs, "Z") + } + switch m.Propagation { + case config.MountPropagationNone: + // noop, private is default + case config.MountPropagationBidirectional: + attrs = append(attrs, "rshared") + case config.MountPropagationHostToContainer: + attrs = append(attrs, "rslave") + default: // Falls back to "private" + } + if len(attrs) > 0 { + bind = fmt.Sprintf("%s:%s", bind, strings.Join(attrs, ",")) + } + args = append(args, fmt.Sprintf("--volume=%s", bind)) + } + return args +} + +// generatePortMappings converts the portMappings list to a list of args for docker +func generatePortMappings(clusterIPFamily config.ClusterIPFamily, portMappings ...config.PortMapping) ([]string, error) { + args := make([]string, 0, len(portMappings)) + for _, pm := range portMappings { + // do provider internal defaulting + // in a future API revision we will handle this at the API level and remove this + if pm.ListenAddress == "" { + switch clusterIPFamily { + case config.IPv4Family, config.DualStackFamily: + pm.ListenAddress = "0.0.0.0" // this is the docker default anyhow + case config.IPv6Family: + pm.ListenAddress = "::" + default: + return nil, errors.Errorf("unknown cluster IP family: %v", clusterIPFamily) + } + } + if string(pm.Protocol) == "" { + pm.Protocol = config.PortMappingProtocolTCP // TCP is the default + } + + // validate that the provider can handle this binding + switch pm.Protocol { + case config.PortMappingProtocolTCP: + case config.PortMappingProtocolUDP: + case config.PortMappingProtocolSCTP: + default: + return nil, errors.Errorf("unknown port mapping protocol: %v", pm.Protocol) + } + + // get a random port if necessary (port = 0) + hostPort, releaseHostPortFn, err := common.PortOrGetFreePort(pm.HostPort, pm.ListenAddress) + if err != nil { + return nil, errors.Wrap(err, "failed to get random host port for port mapping") + } + if releaseHostPortFn != nil { + defer releaseHostPortFn() + } + + // generate the actual mapping arg + protocol := string(pm.Protocol) + hostPortBinding := net.JoinHostPort(pm.ListenAddress, fmt.Sprintf("%d", hostPort)) + args = append(args, fmt.Sprintf("--publish=%s:%d/%s", hostPortBinding, pm.ContainerPort, protocol)) + } + return args, nil +} + +func createContainer(name string, args []string, binaryName string) error { + return exec.Command(binaryName, append([]string{"run", "--name", name}, args...)...).Run() +} + +func createContainerWithWaitUntilSystemdReachesMultiUserSystem(name string, args []string, binaryName string) error { + if err := exec.Command(binaryName, append([]string{"run", "--name", name}, args...)...).Run(); err != nil { + return err + } + + logCtx, logCancel := context.WithTimeout(context.Background(), 30*time.Second) + logCmd := exec.CommandContext(logCtx, binaryName, "logs", "-f", name) + defer logCancel() + return common.WaitUntilLogRegexpMatches(logCtx, logCmd, common.NodeReachedCgroupsReadyRegexp()) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/util.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/util.go new file mode 100644 index 0000000000..6281bbf13c --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/util.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nerdctl + +import ( + "strings" + + "sigs.k8s.io/kind/pkg/exec" +) + +// IsAvailable checks if nerdctl (or finch) is available in the system +func IsAvailable() bool { + cmd := exec.Command("nerdctl", "-v") + lines, err := exec.OutputLines(cmd) + if err != nil || len(lines) != 1 { + // check finch + cmd = exec.Command("finch", "-v") + lines, err = exec.OutputLines(cmd) + if err != nil || len(lines) != 1 { + return false + } + return strings.HasPrefix(lines[0], "finch version") + } + return strings.HasPrefix(lines[0], "nerdctl version") +} + +// rootless: use fuse-overlayfs by default +// https://github.com/kubernetes-sigs/kind/issues/2275 +func mountFuse(binaryName string) bool { + i, err := info(binaryName) + if err != nil { + return false + } + if i != nil && i.Rootless { + return true + } + return false +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/provision.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/provision.go index 5ed1c6b264..5e15707d6f 100644 --- a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/provision.go +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/provision.go @@ -421,10 +421,7 @@ func generatePortMappings(clusterIPFamily config.ClusterIPFamily, portMappings . } func createContainer(name string, args []string) error { - if err := exec.Command("podman", append([]string{"run", "--name", name}, args...)...).Run(); err != nil { - return err - } - return nil + return exec.Command("podman", append([]string{"run", "--name", name}, args...)...).Run() } func createContainerWithWaitUntilSystemdReachesMultiUserSystem(name string, args []string) error { diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/provider.go b/vendor/sigs.k8s.io/kind/pkg/cluster/provider.go index 3cff174787..f5c68e426f 100644 --- a/vendor/sigs.k8s.io/kind/pkg/cluster/provider.go +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/provider.go @@ -34,6 +34,7 @@ import ( "sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig" internalproviders "sigs.k8s.io/kind/pkg/cluster/internal/providers" "sigs.k8s.io/kind/pkg/cluster/internal/providers/docker" + "sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl" "sigs.k8s.io/kind/pkg/cluster/internal/providers/podman" ) @@ -102,8 +103,8 @@ var NoNodeProviderDetectedError = errors.NewWithoutStack("failed to detect any s // Pass the returned ProviderOption to NewProvider to pass the auto-detect Docker // or Podman option explicitly (in the future there will be more options) // -// NOTE: The kind *cli* also checks `KIND_EXPERIMENTAL_PROVIDER` for "podman" or -// "docker" currently and does not auto-detect / respects this if set. +// NOTE: The kind *cli* also checks `KIND_EXPERIMENTAL_PROVIDER` for "podman", +// "nerctl" or "docker" currently and does not auto-detect / respects this if set. // // This will be replaced with some other mechanism in the future (likely when // podman support is GA), in the meantime though your tool may wish to match this. @@ -115,6 +116,9 @@ func DetectNodeProvider() (ProviderOption, error) { if docker.IsAvailable() { return ProviderWithDocker(), nil } + if nerdctl.IsAvailable() { + return ProviderWithNerdctl(""), nil + } if podman.IsAvailable() { return ProviderWithPodman(), nil } @@ -167,6 +171,13 @@ func ProviderWithPodman() ProviderOption { }) } +// ProviderWithNerdctl configures the provider to use the nerdctl runtime +func ProviderWithNerdctl(binaryName string) ProviderOption { + return providerRuntimeOption(func(p *Provider) { + p.provider = nerdctl.NewProvider(p.logger, binaryName) + }) +} + // Create provisions and starts a kubernetes-in-docker cluster func (p *Provider) Create(name string, options ...CreateOption) error { // apply options diff --git a/vendor/sigs.k8s.io/kind/pkg/cmd/kind/version/version.go b/vendor/sigs.k8s.io/kind/pkg/cmd/kind/version/version.go index d1e4b921b9..85f49fac2c 100644 --- a/vendor/sigs.k8s.io/kind/pkg/cmd/kind/version/version.go +++ b/vendor/sigs.k8s.io/kind/pkg/cmd/kind/version/version.go @@ -54,11 +54,11 @@ func DisplayVersion() string { } // versionCore is the core portion of the kind CLI version per Semantic Versioning 2.0.0 -const versionCore = "0.22.0" +const versionCore = "0.23.0" // versionPreRelease is the base pre-release portion of the kind CLI version per // Semantic Versioning 2.0.0 -const versionPreRelease = "" +var versionPreRelease = "" // gitCommitCount count the commits since the last release. // It is injected at build time. diff --git a/vendor/sigs.k8s.io/kind/pkg/fs/fs.go b/vendor/sigs.k8s.io/kind/pkg/fs/fs.go index 7fb4eae337..94764aa574 100644 --- a/vendor/sigs.k8s.io/kind/pkg/fs/fs.go +++ b/vendor/sigs.k8s.io/kind/pkg/fs/fs.go @@ -64,10 +64,10 @@ func Copy(src, dst string) error { return err } // do real copy work - return copy(src, dst, info) + return copyWithSrcInfo(src, dst, info) } -func copy(src, dst string, info os.FileInfo) error { +func copyWithSrcInfo(src, dst string, info os.FileInfo) error { if info.Mode()&os.ModeSymlink != 0 { return copySymlink(src, dst) } @@ -128,7 +128,7 @@ func copySymlink(src, dst string) error { return err } // copy the underlying contents - return copy(realSrc, dst, info) + return copyWithSrcInfo(realSrc, dst, info) } func copyDir(src, dst string, info os.FileInfo) error { @@ -148,7 +148,7 @@ func copyDir(src, dst string, info os.FileInfo) error { if err != nil { return err } - if err := copy(entrySrc, entryDst, fileInfo); err != nil { + if err := copyWithSrcInfo(entrySrc, entryDst, fileInfo); err != nil { return err } } diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/types.go b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/types.go index fed3000798..f93c0e7b19 100644 --- a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/types.go +++ b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/types.go @@ -148,7 +148,7 @@ type Networking struct { // If DisableDefaultCNI is true, kind will not install the default CNI setup. // Instead the user should install their own CNI after creating the cluster. DisableDefaultCNI bool - // KubeProxyMode defines if kube-proxy should operate in iptables or ipvs mode + // KubeProxyMode defines if kube-proxy should operate in iptables, ipvs or nftables mode KubeProxyMode ProxyMode // DNSSearch defines the DNS search domain to use for nodes. If not set, this will be inherited from the host. DNSSearch *[]string @@ -174,6 +174,8 @@ const ( IPTablesProxyMode ProxyMode = "iptables" // IPVSProxyMode sets ProxyMode to ipvs IPVSProxyMode ProxyMode = "ipvs" + // NFTablesProxyMode sets ProxyMode to nftables + NFTablesProxyMode ProxyMode = "nftables" // NoneProxyMode disables kube-proxy NoneProxyMode ProxyMode = "none" ) diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/validate.go b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/validate.go index 2eabaac67e..884e5a473b 100644 --- a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/validate.go +++ b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/validate.go @@ -52,6 +52,11 @@ func (c *Cluster) Validate() error { } } + // ipFamily should be ipv4, ipv6, or dual + if c.Networking.IPFamily != IPv4Family && c.Networking.IPFamily != IPv6Family && c.Networking.IPFamily != DualStackFamily { + errs = append(errs, errors.Errorf("invalid ipFamily: %s", c.Networking.IPFamily)) + } + // podSubnet should be a valid CIDR if err := validateSubnets(c.Networking.PodSubnet, c.Networking.IPFamily); err != nil { errs = append(errs, errors.Errorf("invalid pod subnet %v", err)) @@ -64,7 +69,7 @@ func (c *Cluster) Validate() error { // KubeProxyMode should be iptables or ipvs if c.Networking.KubeProxyMode != IPTablesProxyMode && c.Networking.KubeProxyMode != IPVSProxyMode && - c.Networking.KubeProxyMode != NoneProxyMode { + c.Networking.KubeProxyMode != NoneProxyMode && c.Networking.KubeProxyMode != NFTablesProxyMode { errs = append(errs, errors.Errorf("invalid kubeProxyMode: %s", c.Networking.KubeProxyMode)) }