diff --git a/Makefile b/Makefile index 74bf13cb8..41fa8809f 100644 --- a/Makefile +++ b/Makefile @@ -9,7 +9,7 @@ MODULE := $(shell head -1 go.mod | cut -d' ' -f2) ARTIFACTS := _out TEST_PKGS ?= ./... -TALOS_RELEASE ?= v1.5.0 +TALOS_RELEASE ?= v1.6.0-alpha.0 DEFAULT_K8S_VERSION ?= v1.27.2 TOOLS ?= ghcr.io/siderolabs/tools:v1.5.0 @@ -227,3 +227,11 @@ conformance: ## Performs policy checks against the commit and source code. .PHONY: clean clean: @rm -rf $(ARTIFACTS) + +.PHONY: docs-preview +docs-preview: ## Starts a local preview of the documentation using Hugo in docker + @docker run --rm --interactive --tty \ + --volume $(PWD):/src --workdir /src/website \ + --publish 1313:1313 \ + klakegg/hugo:0.95.0-ext-alpine \ + server diff --git a/app/sidero-controller-manager/config/manager/manager.yaml b/app/sidero-controller-manager/config/manager/manager.yaml index 389af613b..ba7934a44 100644 --- a/app/sidero-controller-manager/config/manager/manager.yaml +++ b/app/sidero-controller-manager/config/manager/manager.yaml @@ -1,5 +1,18 @@ apiVersion: v1 kind: Service +metadata: + name: dhcp + namespace: system +spec: + ports: + - port: 67 + targetPort: dhcp + protocol: UDP + selector: + control-plane: sidero-controller-manager +--- +apiVersion: v1 +kind: Service metadata: name: tftp namespace: system @@ -78,6 +91,9 @@ spec: imagePullPolicy: Always name: manager ports: + - name: dhcp + containerPort: 67 + protocol: UDP - name: tftp containerPort: 69 protocol: UDP diff --git a/app/sidero-controller-manager/internal/dhcp/dhcp_server.go b/app/sidero-controller-manager/internal/dhcp/dhcp_server.go new file mode 100644 index 000000000..4ecccd0b5 --- /dev/null +++ b/app/sidero-controller-manager/internal/dhcp/dhcp_server.go @@ -0,0 +1,211 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at http://mozilla.org/MPL/2.0/. + +package dhcp + +import ( + "errors" + "fmt" + "net" + "strconv" + + "github.com/go-logr/logr" + "github.com/insomniacslk/dhcp/dhcpv4" + "github.com/insomniacslk/dhcp/dhcpv4/server4" + "github.com/insomniacslk/dhcp/iana" + "github.com/siderolabs/gen/slices" +) + +// ServeDHCP starts the DHCP proxy server. +func ServeDHCP(logger logr.Logger, apiEndpoint string, apiPort int) error { + server, err := server4.NewServer( + "", + nil, + handlePacket(logger, apiEndpoint, apiPort), + ) + if err != nil { + logger.Error(err, "error on DHCP4 proxy startup") + + return err + } + + return server.Serve() +} + +func handlePacket(logger logr.Logger, apiEndpoint string, apiPort int) func(conn net.PacketConn, peer net.Addr, m *dhcpv4.DHCPv4) { + return func(conn net.PacketConn, peer net.Addr, m *dhcpv4.DHCPv4) { + if err := isBootDHCP(m); err != nil { + logger.Info("ignoring packet", "source", m.ClientHWAddr, "reason", err) + + return + } + + fwtype, err := validateDHCP(m) + if err != nil { + logger.Info("invalid packet", "source", m.ClientHWAddr, "reason", err) + + return + } + + resp, err := offerDHCP(m, apiEndpoint, apiPort, fwtype) + if err != nil { + logger.Error(err, "failed to construct ProxyDHCP offer", "source", m.ClientHWAddr) + + return + } + + logger.Info("offering boot response", "source", m.ClientHWAddr, "server", resp.TFTPServerName(), "boot_filename", resp.BootFileNameOption()) + + _, err = conn.WriteTo(resp.ToBytes(), peer) + if err != nil { + logger.Error(err, "failure sending response", "source", m.ClientHWAddr) + } + } +} + +func isBootDHCP(pkt *dhcpv4.DHCPv4) error { + if pkt.MessageType() != dhcpv4.MessageTypeDiscover { + return fmt.Errorf("packet is %s, not %s", pkt.MessageType(), dhcpv4.MessageTypeDiscover) + } + + if pkt.Options[93] == nil { + return errors.New("not a PXE boot request (missing option 93)") + } + + return nil +} + +func validateDHCP(m *dhcpv4.DHCPv4) (fwtype Firmware, err error) { + arches := m.ClientArch() + + for _, arch := range arches { + switch arch { //nolint:exhaustive + case iana.INTEL_X86PC: + fwtype = FirmwareX86PC + case iana.EFI_IA32, iana.EFI_X86_64, iana.EFI_BC: + fwtype = FirmwareX86EFI + case iana.EFI_ARM64: + fwtype = FirmwareARMEFI + case iana.EFI_X86_HTTP, iana.EFI_X86_64_HTTP: + fwtype = FirmwareX86HTTP + case iana.EFI_ARM64_HTTP: + fwtype = FirmwareARMHTTP + } + } + + if fwtype == FirmwareUnsupported { + return 0, fmt.Errorf("unsupported client arch: %v", slices.Map(arches, func(a iana.Arch) string { return a.String() })) + } + + // Now, identify special sub-breeds of client firmware based on + // the user-class option. Note these only change the "firmware + // type", not the architecture we're reporting to Booters. We need + // to identify these as part of making the internal chainloading + // logic work properly. + if userClasses := m.UserClass(); len(userClasses) > 0 { + // If the client has had iPXE burned into its ROM (or is a VM + // that uses iPXE as the PXE "ROM"), special handling is + // needed because in this mode the client is using iPXE native + // drivers and chainloading to a UNDI stack won't work. + if userClasses[0] == "iPXE" && fwtype == FirmwareX86PC { + fwtype = FirmwareX86Ipxe + } + } + + guid := m.GetOneOption(dhcpv4.OptionClientMachineIdentifier) + switch len(guid) { + case 0: + // A missing GUID is invalid according to the spec, however + // there are PXE ROMs in the wild that omit the GUID and still + // expect to boot. The only thing we do with the GUID is + // mirror it back to the client if it's there, so we might as + // well accept these buggy ROMs. + case 17: + if guid[0] != 0 { + return 0, errors.New("malformed client GUID (option 97), leading byte must be zero") + } + default: + return 0, errors.New("malformed client GUID (option 97), wrong size") + } + + return fwtype, nil +} + +func offerDHCP(req *dhcpv4.DHCPv4, apiEndpoint string, apiPort int, fwtype Firmware) (*dhcpv4.DHCPv4, error) { + serverIPs, err := net.LookupIP(apiEndpoint) + if err != nil { + return nil, err + } + + if len(serverIPs) == 0 { + return nil, fmt.Errorf("no IPs found for %s", apiEndpoint) + } + + // pick up the first address + serverIP := serverIPs[0] + + modifiers := []dhcpv4.Modifier{ + dhcpv4.WithServerIP(serverIP), + dhcpv4.WithOptionCopied(req, dhcpv4.OptionClientMachineIdentifier), + dhcpv4.WithOptionCopied(req, dhcpv4.OptionClassIdentifier), + } + + resp, err := dhcpv4.NewReplyFromRequest(req, + modifiers..., + ) + if err != nil { + return nil, err + } + + if resp.GetOneOption(dhcpv4.OptionClassIdentifier) == nil { + resp.UpdateOption(dhcpv4.OptClassIdentifier("PXEClient")) + } + + switch fwtype { + case FirmwareX86PC: + // This is completely standard PXE: just load a file from TFTP. + resp.UpdateOption(dhcpv4.OptTFTPServerName(serverIP.String())) + resp.UpdateOption(dhcpv4.OptBootFileName("undionly.kpxe")) + case FirmwareX86Ipxe: + // Almost standard PXE, but the boot filename needs to be a URL. + resp.UpdateOption(dhcpv4.OptBootFileName(fmt.Sprintf("tftp://%s/undionly.kpxe", serverIP))) + case FirmwareX86EFI: + // This is completely standard PXE: just load a file from TFTP. + resp.UpdateOption(dhcpv4.OptTFTPServerName(serverIP.String())) + resp.UpdateOption(dhcpv4.OptBootFileName("snp.efi")) + case FirmwareARMEFI: + // This is completely standard PXE: just load a file from TFTP. + resp.UpdateOption(dhcpv4.OptTFTPServerName(serverIP.String())) + resp.UpdateOption(dhcpv4.OptBootFileName("snp-arm64.efi")) + case FirmwareX86HTTP: + // This is completely standard HTTP-boot: just load a file from HTTP. + resp.UpdateOption(dhcpv4.OptBootFileName(fmt.Sprintf("http://%s/tftp/snp.ipxe", net.JoinHostPort(serverIP.String(), strconv.Itoa(apiPort))))) + case FirmwareARMHTTP: + // This is completely standard HTTP-boot: just load a file from HTTP. + resp.UpdateOption(dhcpv4.OptBootFileName(fmt.Sprintf("http://%s/tftp/snp-arm64.ipxe", net.JoinHostPort(serverIP.String(), strconv.Itoa(apiPort))))) + case FirmwareUnsupported: + fallthrough + default: + return nil, fmt.Errorf("unsupported firmware type %d", fwtype) + } + + return resp, nil +} + +// Firmware describes a kind of firmware attempting to boot. +// +// This should only be used for selecting the right bootloader, +// kernel selection should key off the more generic architecture. +type Firmware int + +// The bootloaders that we know how to handle. +const ( + FirmwareUnsupported Firmware = iota // Unsupported + FirmwareX86PC // "Classic" x86 BIOS with PXE/UNDI support + FirmwareX86EFI // EFI x86 + FirmwareARMEFI // EFI ARM64 + FirmwareX86Ipxe // "Classic" x86 BIOS running iPXE (no UNDI support) + FirmwareX86HTTP // HTTP Boot X86 + FirmwareARMHTTP // ARM64 HTTP Boot +) diff --git a/sfyra/pkg/vm/vm.go b/app/sidero-controller-manager/internal/dhcp/dhcp_test.go similarity index 52% rename from sfyra/pkg/vm/vm.go rename to app/sidero-controller-manager/internal/dhcp/dhcp_test.go index 1b02d2777..8e151ed8a 100644 --- a/sfyra/pkg/vm/vm.go +++ b/app/sidero-controller-manager/internal/dhcp/dhcp_test.go @@ -2,5 +2,13 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at http://mozilla.org/MPL/2.0/. -// Package vm provides tools to build a set of PXE-booted VMs for Sidero testing. -package vm +package dhcp_test + +import "testing" + +func TestEmpty(t *testing.T) { + // added for accurate coverage estimation + // + // please remove it once any unit-test is added + // for this package +} diff --git a/app/sidero-controller-manager/main.go b/app/sidero-controller-manager/main.go index 9ad570669..58c293073 100644 --- a/app/sidero-controller-manager/main.go +++ b/app/sidero-controller-manager/main.go @@ -33,6 +33,7 @@ import ( metalv1alpha1 "github.com/siderolabs/sidero/app/sidero-controller-manager/api/v1alpha1" metalv1alpha2 "github.com/siderolabs/sidero/app/sidero-controller-manager/api/v1alpha2" "github.com/siderolabs/sidero/app/sidero-controller-manager/controllers" + "github.com/siderolabs/sidero/app/sidero-controller-manager/internal/dhcp" "github.com/siderolabs/sidero/app/sidero-controller-manager/internal/ipxe" "github.com/siderolabs/sidero/app/sidero-controller-manager/internal/metadata" "github.com/siderolabs/sidero/app/sidero-controller-manager/internal/power/api" @@ -224,14 +225,22 @@ func main() { errCh := make(chan error) + setupLog.Info("starting proxy DHCP server") + + go func() { + if err := dhcp.ServeDHCP(ctrl.Log.WithName("dhcp-proxy"), apiEndpoint, apiPort); err != nil { + setupLog.Error(err, "unable to start proxy DHCP server", "controller", "Environment") + errCh <- err + } + }() + setupLog.Info("starting TFTP server") go func() { if err := tftp.ServeTFTP(); err != nil { setupLog.Error(err, "unable to start TFTP server", "controller", "Environment") + errCh <- err } - - errCh <- err }() httpMux := http.NewServeMux() @@ -282,12 +291,10 @@ func main() { setupLog.Info("starting manager and HTTP server") go func() { - err := mgr.Start(ctx) - if err != nil { + if err := mgr.Start(ctx); err != nil { setupLog.Error(err, "problem running manager") + errCh <- err } - - errCh <- err }() go func() { @@ -311,12 +318,10 @@ func main() { httpMux.ServeHTTP(w, req) }) - err := http.ListenAndServe(fmt.Sprintf(":%d", httpPort), h2c.NewHandler(grpcHandler, h2s)) - if err != nil { + if err := http.ListenAndServe(fmt.Sprintf(":%d", httpPort), h2c.NewHandler(grpcHandler, h2s)); err != nil { setupLog.Error(err, "problem running HTTP server") + errCh <- err } - - errCh <- err }() for err = range errCh { diff --git a/hack/release.toml b/hack/release.toml index e6eefb135..c53df3ddd 100644 --- a/hack/release.toml +++ b/hack/release.toml @@ -45,4 +45,12 @@ Sidero should be able to process machine config for future versions of Talos. description = """\ Sidero Agent now runs DHCP client in the userland, on the link which was used to PXE boot the machine. This allows to run Sidero Agent on the machine with several autoconfigured network interfaces, when one of them is used for the management network. +""" + + [notes.dhcpproxy] + title = "DHCP Proxy" + description = """\ +Sidero Controller Manager now includes DHCP proxy which augments DHCP response with additional PXE boot options. +When enabled, DHCP server in the environment only handles IP allocation and network configuration, while DHCP proxy +provides PXE boot information automatically based on the architecture and boot method. """ diff --git a/sfyra/cmd/sfyra/cmd/bootstrap_capi.go b/sfyra/cmd/sfyra/cmd/bootstrap_capi.go index a16346377..f822a71af 100644 --- a/sfyra/cmd/sfyra/cmd/bootstrap_capi.go +++ b/sfyra/cmd/sfyra/cmd/bootstrap_capi.go @@ -33,9 +33,9 @@ var bootstrapCAPICmd = &cobra.Command{ RegistryMirrors: options.RegistryMirrors, - CPUs: options.BootstrapCPUs, - MemMB: options.BootstrapMemMB, - DiskGB: options.BootstrapDiskGB, + BootstrapCPUs: options.BootstrapCPUs, + BootstrapMemMB: options.BootstrapMemMB, + BootstrapDiskGB: options.BootstrapDiskGB, }) if err != nil { return err diff --git a/sfyra/cmd/sfyra/cmd/bootstrap_cluster.go b/sfyra/cmd/sfyra/cmd/bootstrap_cluster.go index ac3e4d04c..a0396b726 100644 --- a/sfyra/cmd/sfyra/cmd/bootstrap_cluster.go +++ b/sfyra/cmd/sfyra/cmd/bootstrap_cluster.go @@ -32,9 +32,9 @@ var bootstrapClusterCmd = &cobra.Command{ RegistryMirrors: options.RegistryMirrors, - CPUs: options.BootstrapCPUs, - MemMB: options.BootstrapMemMB, - DiskGB: options.BootstrapDiskGB, + BootstrapCPUs: options.BootstrapCPUs, + BootstrapMemMB: options.BootstrapMemMB, + BootstrapDiskGB: options.BootstrapDiskGB, }) if err != nil { return err diff --git a/sfyra/cmd/sfyra/cmd/bootstrap_servers.go b/sfyra/cmd/sfyra/cmd/bootstrap_servers.go deleted file mode 100644 index 65400332b..000000000 --- a/sfyra/cmd/sfyra/cmd/bootstrap_servers.go +++ /dev/null @@ -1,58 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at http://mozilla.org/MPL/2.0/. - -package cmd - -import ( - "context" - "net/netip" - - "github.com/siderolabs/talos/pkg/cli" - "github.com/spf13/cobra" - - "github.com/siderolabs/sidero/sfyra/pkg/vm" -) - -var bootSource string - -var bootstrapServersCmd = &cobra.Command{ - Use: "servers", - Short: "Create a set of VMs ready for PXE booting.", - Long: ``, - RunE: func(cmd *cobra.Command, args []string) error { - return cli.WithContext(context.Background(), func(ctx context.Context) error { - bootSourceIP := netip.MustParseAddr(bootSource) - - vmSet, err := vm.NewSet(ctx, vm.Options{ - Name: options.ManagementSetName, - Nodes: options.ManagementNodes, - BootSource: bootSourceIP, - CIDR: options.ManagementCIDR, - - TalosctlPath: options.TalosctlPath, - - CPUs: options.ManagementCPUs, - MemMB: options.ManagementMemMB, - DiskGB: options.ManagementDiskGB, - - DefaultBootOrder: options.DefaultBootOrder, - }) - if err != nil { - return err - } - - return vmSet.Setup(ctx) - }) - }, -} - -func init() { - bootstrapCmd.AddCommand(bootstrapServersCmd) - - bootstrapServersCmd.Flags().StringVar(&options.ManagementSetName, "management-set-name", options.ManagementSetName, "name for the management VM set") - bootstrapServersCmd.Flags().IntVar(&options.ManagementNodes, "management-nodes", options.ManagementNodes, "number of PXE nodes to create for the management rack") - bootstrapServersCmd.Flags().StringVar(&options.ManagementCIDR, "management-cidr", options.ManagementCIDR, "management cluster network CIDR") - bootstrapServersCmd.Flags().StringVar(&bootSource, "boot-source", "172.24.0.2", "the boot source IP for the iPXE boot") - bootstrapServersCmd.Flags().StringVar(&options.DefaultBootOrder, "default-boot-order", options.DefaultBootOrder, "QEMU default boot order") -} diff --git a/sfyra/cmd/sfyra/cmd/loadbalancer_create.go b/sfyra/cmd/sfyra/cmd/loadbalancer_create.go index cd6d1b513..a7c7a6544 100644 --- a/sfyra/cmd/sfyra/cmd/loadbalancer_create.go +++ b/sfyra/cmd/sfyra/cmd/loadbalancer_create.go @@ -61,7 +61,7 @@ var loadbalancerCreateCmd = &cobra.Command{ func init() { loadbalancerCmd.AddCommand(loadbalancerCreateCmd) - cidr := netip.MustParsePrefix(options.ManagementCIDR) + cidr := netip.MustParsePrefix(options.BootstrapCIDR) bridgeIP, err := talosnet.NthIPInNetwork(cidr, 1) if err != nil { diff --git a/sfyra/cmd/sfyra/cmd/options.go b/sfyra/cmd/sfyra/cmd/options.go index a3ec45210..a453c18b7 100644 --- a/sfyra/cmd/sfyra/cmd/options.go +++ b/sfyra/cmd/sfyra/cmd/options.go @@ -28,9 +28,7 @@ type Options struct { RegistryMirrors []string - ManagementCIDR string - ManagementSetName string - ManagementNodes int + ManagementNodes int BootstrapMemMB int64 BootstrapCPUs int64 @@ -72,9 +70,7 @@ func DefaultOptions() Options { InfrastructureProviders: []string{"sidero"}, ControlPlaneProviders: []string{"talos"}, - ManagementCIDR: "172.25.0.0/24", - ManagementSetName: "sfyra-management", - ManagementNodes: 4, + ManagementNodes: 4, BootstrapMemMB: 3072, BootstrapCPUs: 3, diff --git a/sfyra/cmd/sfyra/cmd/test_integration.go b/sfyra/cmd/sfyra/cmd/test_integration.go index 48bc7ac8a..eac6b2b0f 100644 --- a/sfyra/cmd/sfyra/cmd/test_integration.go +++ b/sfyra/cmd/sfyra/cmd/test_integration.go @@ -15,7 +15,6 @@ import ( "github.com/siderolabs/sidero/sfyra/pkg/bootstrap" "github.com/siderolabs/sidero/sfyra/pkg/capi" "github.com/siderolabs/sidero/sfyra/pkg/tests" - "github.com/siderolabs/sidero/sfyra/pkg/vm" ) var runTestPattern string @@ -39,46 +38,27 @@ var testIntegrationCmd = &cobra.Command{ RegistryMirrors: options.RegistryMirrors, - CPUs: options.BootstrapCPUs, - MemMB: options.BootstrapMemMB, - DiskGB: options.BootstrapDiskGB, - }) - if err != nil { - return err - } - - if !options.SkipTeardown { - defer bootstrapCluster.TearDown(ctx) //nolint:errcheck - } + BootstrapCPUs: options.BootstrapCPUs, + BootstrapMemMB: options.BootstrapMemMB, + BootstrapDiskGB: options.BootstrapDiskGB, - if err = bootstrapCluster.Setup(ctx); err != nil { - return err - } - - managementSet, err := vm.NewSet(ctx, vm.Options{ - Name: options.ManagementSetName, - Nodes: options.ManagementNodes, - BootSource: bootstrapCluster.SideroComponentsIP(), - CIDR: options.ManagementCIDR, + VMNodes: options.ManagementNodes, - CNIBundleURL: options.BootstrapCNIBundleURL, - TalosctlPath: options.TalosctlPath, - - CPUs: options.ManagementCPUs, - MemMB: options.ManagementMemMB, - DiskGB: options.ManagementDiskGB, + VMCPUs: options.ManagementCPUs, + VMMemMB: options.ManagementMemMB, + VMDiskGB: options.ManagementDiskGB, - DefaultBootOrder: options.DefaultBootOrder, + VMDefaultBootOrder: options.DefaultBootOrder, }) if err != nil { return err } if !options.SkipTeardown { - defer managementSet.TearDown(ctx) //nolint:errcheck + defer bootstrapCluster.TearDown(ctx) //nolint:errcheck } - if err = managementSet.Setup(ctx); err != nil { + if err = bootstrapCluster.Setup(ctx); err != nil { return err } @@ -103,7 +83,7 @@ var testIntegrationCmd = &cobra.Command{ // hacky hack os.Args = append(os.Args[0:1], "-test.v") - if ok := tests.Run(ctx, bootstrapCluster, managementSet, clusterAPI, tests.Options{ + if ok := tests.Run(ctx, bootstrapCluster, clusterAPI, tests.Options{ KernelURL: options.TalosKernelURL, InitrdURL: options.TalosInitrdURL, @@ -131,7 +111,6 @@ func init() { testIntegrationCmd.Flags().StringVar(&options.BootstrapTalosInitramfs, "bootstrap-initramfs", options.BootstrapTalosInitramfs, "Talos initramfs image for bootstrap cluster") testIntegrationCmd.Flags().StringVar(&options.BootstrapTalosInstaller, "bootstrap-installer", options.BootstrapTalosInstaller, "Talos install image for bootstrap cluster") testIntegrationCmd.Flags().StringVar(&options.BootstrapCIDR, "bootstrap-cidr", options.BootstrapCIDR, "bootstrap cluster network CIDR") - testIntegrationCmd.Flags().StringVar(&options.ManagementCIDR, "management-cidr", options.ManagementCIDR, "management cluster network CIDR") testIntegrationCmd.Flags().IntVar(&options.ManagementNodes, "management-nodes", options.ManagementNodes, "number of PXE nodes to create for the management rack") testIntegrationCmd.Flags().StringVar(&options.TalosctlPath, "talosctl-path", options.TalosctlPath, "path to the talosctl (for the QEMU provisioner)") testIntegrationCmd.Flags().StringSliceVar(&options.RegistryMirrors, "registry-mirror", options.RegistryMirrors, "registry mirrors to use") diff --git a/sfyra/go.mod b/sfyra/go.mod index 2d23a37d3..4f70c5bbf 100644 --- a/sfyra/go.mod +++ b/sfyra/go.mod @@ -29,8 +29,8 @@ require ( github.com/siderolabs/go-retry v0.3.2 github.com/siderolabs/net v0.4.0 github.com/siderolabs/sidero v0.0.0-00010101000000-000000000000 - github.com/siderolabs/talos v1.5.0 - github.com/siderolabs/talos/pkg/machinery v1.5.0 + github.com/siderolabs/talos v1.6.0-alpha.0 + github.com/siderolabs/talos/pkg/machinery v1.6.0-alpha.0 github.com/spf13/cobra v1.7.0 github.com/stretchr/testify v1.8.4 go.uber.org/zap v1.25.0 @@ -95,7 +95,7 @@ require ( github.com/google/go-github/v48 v48.2.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/google/uuid v1.3.1 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -177,7 +177,7 @@ require ( gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230726155614-23370e0ffb3e // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/sfyra/go.sum b/sfyra/go.sum index 03061688e..664fb07f5 100644 --- a/sfyra/go.sum +++ b/sfyra/go.sum @@ -68,6 +68,7 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= +github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -81,6 +82,7 @@ github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnweb github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/brianvoe/gofakeit/v6 v6.17.0 h1:obbQTJeHfktJtiZzq0Q1bEpsNUs+yHrYlPVWt7BtmJ4= +github.com/brianvoe/gofakeit/v6 v6.17.0/go.mod h1:Ow6qC71xtwm79anlwKRlWZW6zVq9D2XHE4QSSMP/rU8= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -91,6 +93,7 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.11.0 h1:V8gS/bTCCjX9uUnkUFUpPsksM8n1lXBAvHcpiFk1X2Y= +github.com/cilium/ebpf v0.11.0/go.mod h1:WE7CZAnqOL2RouJ4f1uyNhqr2P4CCvXFIqdRDUgWsVs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= @@ -155,6 +158,7 @@ github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBD github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= @@ -173,6 +177,7 @@ github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbV github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= +github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= @@ -182,6 +187,7 @@ github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gobuffalo/flect v1.0.2 h1:eqjPGSo2WmjgY2XlpGwo2NXgL3RucAKo4k4qQMNA5sA= github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -190,6 +196,7 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -261,11 +268,12 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20230323073829-e72429f035bd h1:r8yyd+DJDmsUhGrRBxH5Pj7KeFK5l+Y3FsgT8keqKtk= +github.com/google/pprof v0.0.0-20230323073829-e72429f035bd/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= @@ -351,6 +359,7 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -378,6 +387,7 @@ github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJ github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= github.com/mdlayher/packet v1.1.2 h1:3Up1NG6LZrsgDVn6X4L9Ge/iyRyxFEFD9o6Pr3Q1nQY= +github.com/mdlayher/packet v1.1.2/go.mod h1:GEu1+n9sG5VtiRE4SydOmX5GTwyyYlteZiFU+x0kew4= github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= @@ -418,6 +428,7 @@ github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= +github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= @@ -465,6 +476,7 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -505,10 +517,10 @@ github.com/siderolabs/net v0.4.0 h1:1bOgVay/ijPkJz4qct98nHsiB/ysLQU0KLoBC4qLm7I= github.com/siderolabs/net v0.4.0/go.mod h1:/ibG+Hm9HU27agp5r9Q3eZicEfjquzNzQNux5uEk0kM= github.com/siderolabs/protoenc v0.2.0 h1:QFxWIAo//12+/bm27GNYoK/TpQGTYsRrrZCu9jSghvU= github.com/siderolabs/protoenc v0.2.0/go.mod h1:mu4gc6pJxhdJYpuloacKE4jsJojj87qDXwn8LUvs2bY= -github.com/siderolabs/talos v1.5.0 h1:9Z6XjykjX1dhWG4k3RoxID1mmdBKcT12zkBxJyH+Or0= -github.com/siderolabs/talos v1.5.0/go.mod h1:d4KCrm7xK93Zp7nShFXl8kYDY2C0vn5fE8tfe9ngeHw= -github.com/siderolabs/talos/pkg/machinery v1.5.0 h1:yWRYcMKSkdDhumYnSnv/HndJ/S7AYl2/JcwkHWwB1So= -github.com/siderolabs/talos/pkg/machinery v1.5.0/go.mod h1:7Mmswfab95ULNclTI4ZGR8hZaQyrjDVfSyYGVECgFBs= +github.com/siderolabs/talos v1.6.0-alpha.0 h1:+yF92ME/mul/npANEmEfZ/w5BqWQUSjMM7lvAGfECYY= +github.com/siderolabs/talos v1.6.0-alpha.0/go.mod h1:rKeO9Cs0ooh4cnDJjHGl/CaqDJYNhuJAIl0Gg+soQuw= +github.com/siderolabs/talos/pkg/machinery v1.6.0-alpha.0 h1:YGqLGzCFsKeJwqzajumlqLgIxQtUuExWWqYgBHJ1fEQ= +github.com/siderolabs/talos/pkg/machinery v1.6.0-alpha.0/go.mod h1:z+F1hVjcrk5WXbZRu7ODij8MwwyyGJJIlsfpbY8g2bs= github.com/siderolabs/tcpproxy v0.1.0 h1:IbkS9vRhjMOscc1US3M5P1RnsGKFgB6U5IzUk+4WkKA= github.com/siderolabs/tcpproxy v0.1.0/go.mod h1:onn6CPPj/w1UNqQ0U97oRPF0CqbrgEApYCw4P9IiCW8= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -587,6 +599,7 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= @@ -619,6 +632,7 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2 h1:Jvc7gsqn21cJHCmAWx0LiimpP18LZmUxkT5Mp7EZ1mI= +golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -861,6 +875,7 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.11.0 h1:EMCa6U9S2LtZXLAMoWiR/R8dAQFRqbAitmbJ2UKhoi8= +golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -932,11 +947,12 @@ google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130 h1:Au6te5hbKUV8pIYWHqOUZ1pva5qK/rwbIhoXEUB9Lu8= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw= google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230726155614-23370e0ffb3e h1:S83+ibolgyZ0bqz7KEsUOPErxcv4VzlszxY+31OfB/E= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= diff --git a/sfyra/pkg/bootstrap/cluster.go b/sfyra/pkg/bootstrap/cluster.go index d1fa0927b..4a94d394f 100644 --- a/sfyra/pkg/bootstrap/cluster.go +++ b/sfyra/pkg/bootstrap/cluster.go @@ -58,9 +58,17 @@ type Options struct { RegistryMirrors []string - MemMB int64 - CPUs int64 - DiskGB int64 + BootstrapMemMB int64 + BootstrapCPUs int64 + BootstrapDiskGB int64 + + VMNodes int + + VMMemMB int64 + VMCPUs int64 + VMDiskGB int64 + + VMDefaultBootOrder string } // NewCluster creates new bootstrap Talos cluster. @@ -228,11 +236,11 @@ func (cluster *Cluster) create(ctx context.Context) error { Name: constants.BootstrapControlPlane, Type: machine.TypeControlPlane, IPs: []netip.Addr{cluster.controlplaneIP}, - Memory: cluster.options.MemMB * 1024 * 1024, - NanoCPUs: cluster.options.CPUs * 1000 * 1000 * 1000, + Memory: cluster.options.BootstrapMemMB * 1024 * 1024, + NanoCPUs: cluster.options.BootstrapCPUs * 1000 * 1000 * 1000, Disks: []*provision.Disk{ { - Size: uint64(cluster.options.DiskGB) * 1024 * 1024 * 1024, + Size: uint64(cluster.options.BootstrapDiskGB) * 1024 * 1024 * 1024, }, }, Config: configBundle.ControlPlane(), @@ -241,17 +249,47 @@ func (cluster *Cluster) create(ctx context.Context) error { Name: constants.BootstrapWorker, Type: machine.TypeWorker, IPs: []netip.Addr{cluster.workerIP}, - Memory: cluster.options.MemMB * 1024 * 1024, - NanoCPUs: cluster.options.CPUs * 1000 * 1000 * 1000, + Memory: cluster.options.BootstrapMemMB * 1024 * 1024, + NanoCPUs: cluster.options.BootstrapCPUs * 1000 * 1000 * 1000, Disks: []*provision.Disk{ { - Size: uint64(cluster.options.DiskGB) * 1024 * 1024 * 1024, + Size: uint64(cluster.options.BootstrapDiskGB) * 1024 * 1024 * 1024, }, }, Config: configBundle.Worker(), }, ) + vmIPs := make([]netip.Addr, cluster.options.VMNodes) + + for i := range vmIPs { + vmIPs[i], err = talosnet.NthIPInNetwork(cidr, i+4) + if err != nil { + return err + } + } + + for i := 0; i < cluster.options.VMNodes; i++ { + request.Nodes = append(request.Nodes, + provision.NodeRequest{ + Name: fmt.Sprintf("pxe-%d", i), + Type: machine.TypeUnknown, + IPs: []netip.Addr{vmIPs[i]}, + Memory: cluster.options.VMMemMB * 1024 * 1024, + NanoCPUs: cluster.options.VMCPUs * 1000 * 1000 * 1000, + Disks: []*provision.Disk{ + { + Size: uint64(cluster.options.VMDiskGB) * 1024 * 1024 * 1024, + }, + }, + PXEBooted: true, + // TFTPServer: set.options.BootSource.String(), + // IPXEBootFilename: "undionly.kpxe", + SkipInjectingConfig: true, + DefaultBootOrder: cluster.options.VMDefaultBootOrder, + }) + } + cluster.cluster, err = cluster.provisioner.Create(ctx, request, provision.WithBootlader(true), // TODO: UEFI doesn't work correctly on PXE timeout, as it drops to UEFI shell @@ -314,3 +352,8 @@ func (cluster *Cluster) BridgeIP() netip.Addr { func (cluster *Cluster) Name() string { return cluster.cluster.Info().ClusterName } + +// Nodes return information about PXE VMs. +func (cluster *Cluster) Nodes() []provision.NodeInfo { + return cluster.cluster.Info().ExtraNodes +} diff --git a/sfyra/pkg/talos/cluster.go b/sfyra/pkg/talos/cluster.go index b98a6bfc3..25d21b427 100644 --- a/sfyra/pkg/talos/cluster.go +++ b/sfyra/pkg/talos/cluster.go @@ -9,6 +9,7 @@ import ( "net/netip" "github.com/siderolabs/talos/pkg/cluster" + "github.com/siderolabs/talos/pkg/provision" ) // Cluster is an abstract interface for the Talos cluster. @@ -23,4 +24,6 @@ type Cluster interface { SideroComponentsIP() netip.Addr // K8s client source. KubernetesClient() cluster.K8sProvider + // Nodes returns a list of PXE VMs. + Nodes() []provision.NodeInfo } diff --git a/sfyra/pkg/tests/cluster_utils.go b/sfyra/pkg/tests/cluster_utils.go index 88e5b3231..217d72c76 100644 --- a/sfyra/pkg/tests/cluster_utils.go +++ b/sfyra/pkg/tests/cluster_utils.go @@ -28,11 +28,10 @@ import ( "github.com/siderolabs/sidero/sfyra/pkg/capi" "github.com/siderolabs/sidero/sfyra/pkg/loadbalancer" "github.com/siderolabs/sidero/sfyra/pkg/talos" - "github.com/siderolabs/sidero/sfyra/pkg/vm" ) // createCluster without waiting for it to become ready. -func createCluster(ctx context.Context, t *testing.T, metalClient client.Client, capiCluster talos.Cluster, vmSet *vm.Set, +func createCluster(ctx context.Context, t *testing.T, metalClient client.Client, capiCluster talos.Cluster, capiManager *capi.Manager, clusterName, serverClassName string, loadbalancerPort int, controlPlaneNodes, workerNodes int64, talosVersion, kubernetesVersion string, ) *loadbalancer.ControlPlane { t.Logf("deploying cluster %q from server class %q with loadbalancer port %d", clusterName, serverClassName, loadbalancerPort) @@ -46,10 +45,10 @@ func createCluster(ctx context.Context, t *testing.T, metalClient client.Client, capiClient := capiManager.GetManagerClient() //nolint:contextcheck - loadbalancer, err := loadbalancer.NewControlPlane(metalClient, vmSet.BridgeIP(), loadbalancerPort, "default", clusterName, false) + loadbalancer, err := loadbalancer.NewControlPlane(metalClient, capiCluster.BridgeIP(), loadbalancerPort, "default", clusterName, false) require.NoError(t, err) - t.Setenv("CONTROL_PLANE_ENDPOINT", vmSet.BridgeIP().String()) + t.Setenv("CONTROL_PLANE_ENDPOINT", capiCluster.BridgeIP().String()) t.Setenv("CONTROL_PLANE_PORT", strconv.Itoa(loadbalancerPort)) t.Setenv("CONTROL_PLANE_SERVERCLASS", serverClassName) t.Setenv("WORKER_SERVERCLASS", serverClassName) @@ -116,7 +115,7 @@ func createCluster(ctx context.Context, t *testing.T, metalClient client.Client, } // waitForClusterReady waits for cluster to become ready. -func waitForClusterReady(ctx context.Context, t *testing.T, metalClient client.Client, vmSet *vm.Set, clusterName string) *capi.Cluster { +func waitForClusterReady(ctx context.Context, t *testing.T, metalClient client.Client, capiCluster talos.Cluster, clusterName string) *capi.Cluster { t.Log("waiting for the cluster to be provisioned") require.NoError(t, retry.Constant(10*time.Minute, retry.WithUnits(10*time.Second), retry.WithErrorLogging(true)).Retry(func() error { @@ -125,7 +124,7 @@ func waitForClusterReady(ctx context.Context, t *testing.T, metalClient client.C t.Log("verifying cluster health") - deployedCluster, err := capi.NewCluster(ctx, metalClient, clusterName, vmSet.BridgeIP()) + deployedCluster, err := capi.NewCluster(ctx, metalClient, clusterName, capiCluster.BridgeIP()) require.NoError(t, err) require.NoError(t, deployedCluster.Health(ctx)) @@ -133,12 +132,12 @@ func waitForClusterReady(ctx context.Context, t *testing.T, metalClient client.C return deployedCluster } -func deployCluster(ctx context.Context, t *testing.T, metalClient client.Client, capiCluster talos.Cluster, vmSet *vm.Set, +func deployCluster(ctx context.Context, t *testing.T, metalClient client.Client, capiCluster talos.Cluster, capiManager *capi.Manager, clusterName, serverClassName string, loadbalancerPort int, controlPlaneNodes, workerNodes int64, talosVersion, kubernetesVersion string, ) *loadbalancer.ControlPlane { - loadbalancer := createCluster(ctx, t, metalClient, capiCluster, vmSet, capiManager, clusterName, serverClassName, loadbalancerPort, controlPlaneNodes, workerNodes, talosVersion, kubernetesVersion) + loadbalancer := createCluster(ctx, t, metalClient, capiCluster, capiManager, clusterName, serverClassName, loadbalancerPort, controlPlaneNodes, workerNodes, talosVersion, kubernetesVersion) - waitForClusterReady(ctx, t, metalClient, vmSet, clusterName) + waitForClusterReady(ctx, t, metalClient, capiCluster, clusterName) return loadbalancer } diff --git a/sfyra/pkg/tests/management_cluster.go b/sfyra/pkg/tests/management_cluster.go index 0e5ceef5a..88ee1fe12 100644 --- a/sfyra/pkg/tests/management_cluster.go +++ b/sfyra/pkg/tests/management_cluster.go @@ -12,7 +12,6 @@ import ( "github.com/siderolabs/sidero/sfyra/pkg/capi" "github.com/siderolabs/sidero/sfyra/pkg/talos" - "github.com/siderolabs/sidero/sfyra/pkg/vm" ) const ( @@ -21,8 +20,8 @@ const ( ) // TestManagementCluster deploys the management cluster via CAPI. -func TestManagementCluster(ctx context.Context, metalClient client.Client, cluster talos.Cluster, vmSet *vm.Set, capiManager *capi.Manager, talosRelease, kubernetesVersion string) TestFunc { +func TestManagementCluster(ctx context.Context, metalClient client.Client, cluster talos.Cluster, capiManager *capi.Manager, talosRelease, kubernetesVersion string) TestFunc { return func(t *testing.T) { - deployCluster(ctx, t, metalClient, cluster, vmSet, capiManager, managementClusterName, serverClassName, managementClusterLBPort, 1, 1, talosRelease, kubernetesVersion) + deployCluster(ctx, t, metalClient, cluster, capiManager, managementClusterName, serverClassName, managementClusterLBPort, 1, 1, talosRelease, kubernetesVersion) } } diff --git a/sfyra/pkg/tests/scale.go b/sfyra/pkg/tests/scale.go index e17cd8bcb..a04b76822 100644 --- a/sfyra/pkg/tests/scale.go +++ b/sfyra/pkg/tests/scale.go @@ -19,51 +19,51 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/siderolabs/sidero/sfyra/pkg/capi" - "github.com/siderolabs/sidero/sfyra/pkg/vm" + "github.com/siderolabs/sidero/sfyra/pkg/talos" ) type ScaleCallBack func(runtime.Object) error // TestScaleControlPlaneUp verifies that the control plane can scale up. -func TestScaleControlPlaneUp(ctx context.Context, metalClient client.Client, vmSet *vm.Set) TestFunc { +func TestScaleControlPlaneUp(ctx context.Context, metalClient client.Client, capiCluster talos.Cluster) TestFunc { return func(t *testing.T) { err := scaleControlPlane(ctx, metalClient, 3) require.NoError(t, err) - err = verifyClusterHealth(ctx, metalClient, vmSet, t) + err = verifyClusterHealth(ctx, metalClient, capiCluster, t) require.NoError(t, err) } } // TestScaleControlPlaneDown verifies that the control plane can scale down. -func TestScaleControlPlaneDown(ctx context.Context, metalClient client.Client, vmSet *vm.Set) TestFunc { +func TestScaleControlPlaneDown(ctx context.Context, metalClient client.Client, capiCluster talos.Cluster) TestFunc { return func(t *testing.T) { err := scaleControlPlane(ctx, metalClient, 1) require.NoError(t, err) - err = verifyClusterHealth(ctx, metalClient, vmSet, t) + err = verifyClusterHealth(ctx, metalClient, capiCluster, t) require.NoError(t, err) } } // TestScaleWorkersUp verifies that the workers can scale up. -func TestScaleWorkersUp(ctx context.Context, metalClient client.Client, vmSet *vm.Set) TestFunc { +func TestScaleWorkersUp(ctx context.Context, metalClient client.Client, capiCluster talos.Cluster) TestFunc { return func(t *testing.T) { err := scaleWorkers(ctx, metalClient, 3) require.NoError(t, err) - err = verifyClusterHealth(ctx, metalClient, vmSet, t) + err = verifyClusterHealth(ctx, metalClient, capiCluster, t) require.NoError(t, err) } } // TestScaleWorkersDown verifies that the workers can scale down. -func TestScaleWorkersDown(ctx context.Context, metalClient client.Client, vmSet *vm.Set) TestFunc { +func TestScaleWorkersDown(ctx context.Context, metalClient client.Client, capiCluster talos.Cluster) TestFunc { return func(t *testing.T) { err := scaleWorkers(ctx, metalClient, 1) require.NoError(t, err) - err = verifyClusterHealth(ctx, metalClient, vmSet, t) + err = verifyClusterHealth(ctx, metalClient, capiCluster, t) require.NoError(t, err) } } @@ -177,10 +177,10 @@ func scale(ctx context.Context, metalClient client.Client, name string, obj clie return nil } -func verifyClusterHealth(ctx context.Context, metalClient client.Reader, vmSet *vm.Set, t *testing.T) error { +func verifyClusterHealth(ctx context.Context, metalClient client.Reader, capiCluster talos.Cluster, t *testing.T) error { t.Log("verifying cluster health") - cluster, err := capi.NewCluster(ctx, metalClient, managementClusterName, vmSet.BridgeIP()) + cluster, err := capi.NewCluster(ctx, metalClient, managementClusterName, capiCluster.BridgeIP()) if err != nil { return err } diff --git a/sfyra/pkg/tests/server.go b/sfyra/pkg/tests/server.go index 431f873fb..a2e35c5ac 100644 --- a/sfyra/pkg/tests/server.go +++ b/sfyra/pkg/tests/server.go @@ -37,13 +37,12 @@ import ( "github.com/siderolabs/sidero/sfyra/pkg/capi" "github.com/siderolabs/sidero/sfyra/pkg/constants" "github.com/siderolabs/sidero/sfyra/pkg/talos" - "github.com/siderolabs/sidero/sfyra/pkg/vm" ) // TestServerRegistration verifies that all the servers got registered. -func TestServerRegistration(ctx context.Context, metalClient client.Client, vmSet *vm.Set) TestFunc { +func TestServerRegistration(ctx context.Context, metalClient client.Client, capiCluster talos.Cluster) TestFunc { return func(t *testing.T) { - numNodes := len(vmSet.Nodes()) + numNodes := len(capiCluster.Nodes()) var servers *metalv1.ServerList @@ -64,7 +63,7 @@ func TestServerRegistration(ctx context.Context, metalClient client.Client, vmSe assert.Len(t, servers.Items, numNodes) - nodes := vmSet.Nodes() + nodes := capiCluster.Nodes() expectedUUIDs := make([]string, len(nodes)) for i := range nodes { @@ -99,11 +98,11 @@ func configPatchToJSON(t *testing.T, o interface{}) []byte { } // TestServerMgmtAPI patches all the servers for the management API. -func TestServerMgmtAPI(ctx context.Context, metalClient client.Client, vmSet *vm.Set) TestFunc { +func TestServerMgmtAPI(ctx context.Context, metalClient client.Client, capiCluster talos.Cluster) TestFunc { return func(t *testing.T) { - bridgeIP := vmSet.BridgeIP() + bridgeIP := capiCluster.BridgeIP() - for _, vm := range vmSet.Nodes() { + for _, vm := range capiCluster.Nodes() { server := metalv1.Server{} require.NoError(t, metalClient.Get(ctx, types.NamespacedName{Name: vm.UUID.String()}, &server)) @@ -218,7 +217,7 @@ func TestServerPatch(ctx context.Context, metalClient client.Client, registryMir } // TestServerAcceptance makes sure the accepted bool works. -func TestServerAcceptance(ctx context.Context, metalClient client.Client, vmSet *vm.Set) TestFunc { +func TestServerAcceptance(ctx context.Context, metalClient client.Client) TestFunc { return func(t *testing.T) { const numDummies = 3 @@ -304,7 +303,7 @@ func TestServerAcceptance(ctx context.Context, metalClient client.Client, vmSet } // TestServerCordoned makes sure the cordoned bool works. -func TestServerCordoned(ctx context.Context, metalClient client.Client, vmSet *vm.Set) TestFunc { +func TestServerCordoned(ctx context.Context, metalClient client.Client) TestFunc { return func(t *testing.T) { const numDummies = 3 @@ -533,7 +532,7 @@ const ( ) // TestServerPXEBoot verifies that PXE boot is retried when the server gets incorrect configuration. -func TestServerPXEBoot(ctx context.Context, metalClient client.Client, cluster talos.Cluster, vmSet *vm.Set, capiManager *capi.Manager, talosRelease, kubernetesVersion string) TestFunc { +func TestServerPXEBoot(ctx context.Context, metalClient client.Client, cluster talos.Cluster, capiManager *capi.Manager, talosRelease, kubernetesVersion string) TestFunc { return func(t *testing.T) { pxeTestServerClass := "pxe-test-server" @@ -568,7 +567,7 @@ func TestServerPXEBoot(ctx context.Context, metalClient client.Client, cluster t t.Cleanup(func() { assert.NoError(t, metalClient.Delete(ctx, &serverClass)) }) - loadbalancer := createCluster(ctx, t, metalClient, cluster, vmSet, capiManager, pxeTestClusterName, pxeTestServerClass, pxeTestClusterLBPort, 1, 0, talosRelease, kubernetesVersion) + loadbalancer := createCluster(ctx, t, metalClient, cluster, capiManager, pxeTestClusterName, pxeTestServerClass, pxeTestClusterLBPort, 1, 0, talosRelease, kubernetesVersion) t.Log("waiting for the machine to report config validation error") @@ -612,7 +611,7 @@ func TestServerPXEBoot(ctx context.Context, metalClient client.Client, cluster t err = patchHelper.Patch(ctx, &serverClass) require.NoError(t, err) - waitForClusterReady(ctx, t, metalClient, vmSet, pxeTestClusterName) + waitForClusterReady(ctx, t, metalClient, cluster, pxeTestClusterName) deleteCluster(ctx, t, metalClient, pxeTestClusterName) loadbalancer.Close() //nolint:errcheck diff --git a/sfyra/pkg/tests/server_class.go b/sfyra/pkg/tests/server_class.go index 0784b0942..694a79fb1 100644 --- a/sfyra/pkg/tests/server_class.go +++ b/sfyra/pkg/tests/server_class.go @@ -36,7 +36,6 @@ import ( "github.com/siderolabs/sidero/sfyra/pkg/capi" "github.com/siderolabs/sidero/sfyra/pkg/constants" "github.com/siderolabs/sidero/sfyra/pkg/talos" - "github.com/siderolabs/sidero/sfyra/pkg/vm" ) const ( @@ -45,9 +44,9 @@ const ( ) // TestServerClassAny verifies server class "any". -func TestServerClassAny(ctx context.Context, metalClient client.Client, vmSet *vm.Set) TestFunc { +func TestServerClassAny(ctx context.Context, metalClient client.Client, capiCluster talos.Cluster) TestFunc { return func(t *testing.T) { - numNodes := len(vmSet.Nodes()) + numNodes := len(capiCluster.Nodes()) var serverClass metalv1.ServerClass err := metalClient.Get(ctx, types.NamespacedName{Name: metalv1.ServerClassAny}, &serverClass) @@ -81,7 +80,7 @@ func TestServerClassAny(ctx context.Context, metalClient client.Client, vmSet *v } // TestServerClassCreate verifies server class creation. -func TestServerClassCreate(ctx context.Context, metalClient client.Client, vmSet *vm.Set) TestFunc { +func TestServerClassCreate(ctx context.Context, metalClient client.Client, capiCluster talos.Cluster) TestFunc { return func(t *testing.T) { classSpec := metalv1.ServerClassSpec{ Qualifiers: metalv1.Qualifiers{ @@ -105,7 +104,7 @@ func TestServerClassCreate(ctx context.Context, metalClient client.Client, vmSet serverClass, err := createServerClass(ctx, metalClient, defaultServerClassName, classSpec) require.NoError(t, err) - numNodes := len(vmSet.Nodes()) + numNodes := len(capiCluster.Nodes()) // wait for the server class to gather all nodes (all nodes should match) require.NoError(t, retry.Constant(2*time.Minute, retry.WithUnits(10*time.Second)).Retry(func() error { @@ -122,7 +121,7 @@ func TestServerClassCreate(ctx context.Context, metalClient client.Client, vmSet assert.Len(t, append(serverClass.Status.ServersAvailable, serverClass.Status.ServersInUse...), numNodes) - nodes := vmSet.Nodes() + nodes := capiCluster.Nodes() expectedUUIDs := make([]string, len(nodes)) for i := range nodes { diff --git a/sfyra/pkg/tests/tests.go b/sfyra/pkg/tests/tests.go index 6d344e91e..dca1268af 100644 --- a/sfyra/pkg/tests/tests.go +++ b/sfyra/pkg/tests/tests.go @@ -13,7 +13,6 @@ import ( "github.com/siderolabs/sidero/sfyra/pkg/capi" "github.com/siderolabs/sidero/sfyra/pkg/talos" - "github.com/siderolabs/sidero/sfyra/pkg/vm" ) // TestFunc is a testing function prototype. @@ -32,7 +31,7 @@ type Options struct { } // Run all the tests. -func Run(ctx context.Context, cluster talos.Cluster, vmSet *vm.Set, capiManager *capi.Manager, options Options) (ok bool) { +func Run(ctx context.Context, cluster talos.Cluster, capiManager *capi.Manager, options Options) (ok bool) { metalClient, err := capiManager.GetMetalClient(ctx) if err != nil { log.Printf("error creating metalClient: %s", err) @@ -43,11 +42,11 @@ func Run(ctx context.Context, cluster talos.Cluster, vmSet *vm.Set, capiManager testList := []testing.InternalTest{ { "TestServerRegistration", - TestServerRegistration(ctx, metalClient, vmSet), + TestServerRegistration(ctx, metalClient, cluster), }, { "TestServerMgmtAPI", - TestServerMgmtAPI(ctx, metalClient, vmSet), + TestServerMgmtAPI(ctx, metalClient, cluster), }, { "TestServerPatch", @@ -59,11 +58,11 @@ func Run(ctx context.Context, cluster talos.Cluster, vmSet *vm.Set, capiManager }, { "TestServerAcceptance", - TestServerAcceptance(ctx, metalClient, vmSet), + TestServerAcceptance(ctx, metalClient), }, { "TestServerCordoned", - TestServerCordoned(ctx, metalClient, vmSet), + TestServerCordoned(ctx, metalClient), }, { "TestServerResetOnAcceptance", @@ -87,11 +86,11 @@ func Run(ctx context.Context, cluster talos.Cluster, vmSet *vm.Set, capiManager }, { "TestServerClassAny", - TestServerClassAny(ctx, metalClient, vmSet), + TestServerClassAny(ctx, metalClient, cluster), }, { "TestServerClassCreate", - TestServerClassCreate(ctx, metalClient, vmSet), + TestServerClassCreate(ctx, metalClient, cluster), }, { "TestServerClassPatch", @@ -99,11 +98,11 @@ func Run(ctx context.Context, cluster talos.Cluster, vmSet *vm.Set, capiManager }, { "TestServerPXEBoot", - TestServerPXEBoot(ctx, metalClient, cluster, vmSet, capiManager, options.TalosRelease, options.KubernetesVersion), + TestServerPXEBoot(ctx, metalClient, cluster, capiManager, options.TalosRelease, options.KubernetesVersion), }, { "TestManagementCluster", - TestManagementCluster(ctx, metalClient, cluster, vmSet, capiManager, options.TalosRelease, options.KubernetesVersion), + TestManagementCluster(ctx, metalClient, cluster, capiManager, options.TalosRelease, options.KubernetesVersion), }, { "TestMatchServersMetalMachines", @@ -111,19 +110,19 @@ func Run(ctx context.Context, cluster talos.Cluster, vmSet *vm.Set, capiManager }, { "TestScaleWorkersUp", - TestScaleWorkersUp(ctx, metalClient, vmSet), + TestScaleWorkersUp(ctx, metalClient, cluster), }, { "TestScaleWorkersDown", - TestScaleWorkersDown(ctx, metalClient, vmSet), + TestScaleWorkersDown(ctx, metalClient, cluster), }, { "TestScaleControlPlaneUp", - TestScaleControlPlaneUp(ctx, metalClient, vmSet), + TestScaleControlPlaneUp(ctx, metalClient, cluster), }, { "TestScaleControlPlaneDown", - TestScaleControlPlaneDown(ctx, metalClient, vmSet), + TestScaleControlPlaneDown(ctx, metalClient, cluster), }, { "TestMachineDeploymentReconcile", @@ -139,7 +138,7 @@ func Run(ctx context.Context, cluster talos.Cluster, vmSet *vm.Set, capiManager }, { "TestWorkloadCluster", - TestWorkloadCluster(ctx, metalClient, cluster, vmSet, capiManager, options.TalosRelease, options.KubernetesVersion), + TestWorkloadCluster(ctx, metalClient, cluster, capiManager, options.TalosRelease, options.KubernetesVersion), }, } diff --git a/sfyra/pkg/tests/workload_cluster.go b/sfyra/pkg/tests/workload_cluster.go index f3a496fb9..f09034c49 100644 --- a/sfyra/pkg/tests/workload_cluster.go +++ b/sfyra/pkg/tests/workload_cluster.go @@ -12,7 +12,6 @@ import ( "github.com/siderolabs/sidero/sfyra/pkg/capi" "github.com/siderolabs/sidero/sfyra/pkg/talos" - "github.com/siderolabs/sidero/sfyra/pkg/vm" ) const ( @@ -21,9 +20,9 @@ const ( ) // TestWorkloadCluster deploys and destroys the workload cluster via CAPI. -func TestWorkloadCluster(ctx context.Context, metalClient client.Client, cluster talos.Cluster, vmSet *vm.Set, capiManager *capi.Manager, talosRelease, kubernetesVersion string) TestFunc { +func TestWorkloadCluster(ctx context.Context, metalClient client.Client, cluster talos.Cluster, capiManager *capi.Manager, talosRelease, kubernetesVersion string) TestFunc { return func(t *testing.T) { - loadbalancer := deployCluster(ctx, t, metalClient, cluster, vmSet, capiManager, workloadClusterName, serverClassName, workloadClusterLBPort, 1, 0, talosRelease, kubernetesVersion) + loadbalancer := deployCluster(ctx, t, metalClient, cluster, capiManager, workloadClusterName, serverClassName, workloadClusterLBPort, 1, 0, talosRelease, kubernetesVersion) defer loadbalancer.Close() deleteCluster(ctx, t, metalClient, workloadClusterName) diff --git a/sfyra/pkg/vm/set.go b/sfyra/pkg/vm/set.go deleted file mode 100644 index 7dbbc5517..000000000 --- a/sfyra/pkg/vm/set.go +++ /dev/null @@ -1,201 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at http://mozilla.org/MPL/2.0/. - -package vm - -import ( - "context" - "fmt" - "net/netip" - "path/filepath" - - talosnet "github.com/siderolabs/net" - clientconfig "github.com/siderolabs/talos/pkg/machinery/client/config" - "github.com/siderolabs/talos/pkg/machinery/config/machine" - "github.com/siderolabs/talos/pkg/provision" - "github.com/siderolabs/talos/pkg/provision/providers/qemu" - - "github.com/siderolabs/sidero/sfyra/pkg/constants" -) - -// Set is a number of PXE-booted VMs. -type Set struct { - provisioner provision.Provisioner - cluster provision.Cluster - options Options - stateDir string - cniDir string - bridgeIP netip.Addr -} - -// Options configure new VM set. -type Options struct { - Name string - Nodes int - BootSource netip.Addr - CIDR string - - TalosctlPath string - CNIBundleURL string - - MemMB int64 - CPUs int64 - DiskGB int64 - - DefaultBootOrder string -} - -// NewSet creates new VM set. -func NewSet(ctx context.Context, options Options) (*Set, error) { - set := &Set{ - options: options, - } - - var err error - set.provisioner, err = qemu.NewProvisioner(ctx) - - if err != nil { - return nil, err - } - - return set, nil -} - -// Setup the VM set. -func (set *Set) Setup(ctx context.Context) error { - var err error - - defaultStateDir, err := clientconfig.GetTalosDirectory() - if err != nil { - return err - } - - set.stateDir = filepath.Join(defaultStateDir, "clusters") - set.cniDir = filepath.Join(defaultStateDir, "cni") - - fmt.Printf("VM set state directory: %s, name: %s\n", set.stateDir, set.options.Name) - - if err = set.findExisting(ctx); err != nil { - fmt.Printf("VM set not found: %s, creating new one\n", err) - - return set.create(ctx) - } - - return nil -} - -func (set *Set) findExisting(ctx context.Context) error { - var err error - - set.cluster, err = set.provisioner.Reflect(ctx, set.options.Name, set.stateDir) - if err != nil { - return err - } - - cidr, err := netip.ParsePrefix(set.options.CIDR) - if err != nil { - return err - } - - set.bridgeIP, err = talosnet.NthIPInNetwork(cidr, 1) - if err != nil { - return err - } - - return nil -} - -func (set *Set) create(ctx context.Context) error { - cidr, err := netip.ParsePrefix(set.options.CIDR) - if err != nil { - return err - } - - set.bridgeIP, err = talosnet.NthIPInNetwork(cidr, 1) - if err != nil { - return err - } - - ips := make([]netip.Addr, 1+set.options.Nodes) - - for i := range ips { - ips[i], err = talosnet.NthIPInNetwork(cidr, i+2) - if err != nil { - return err - } - } - - request := provision.ClusterRequest{ - Name: set.options.Name, - - Network: provision.NetworkRequest{ - Name: set.options.Name, - CIDRs: []netip.Prefix{cidr}, - GatewayAddrs: []netip.Addr{set.bridgeIP}, - MTU: constants.MTU, - Nameservers: constants.Nameservers, - CNI: provision.CNIConfig{ - BinPath: []string{filepath.Join(set.cniDir, "bin")}, - ConfDir: filepath.Join(set.cniDir, "conf.d"), - CacheDir: filepath.Join(set.cniDir, "cache"), - - BundleURL: set.options.CNIBundleURL, - }, - }, - - SelfExecutable: set.options.TalosctlPath, - StateDirectory: set.stateDir, - } - - for i := 0; i < set.options.Nodes; i++ { - request.Nodes = append(request.Nodes, - provision.NodeRequest{ - Name: fmt.Sprintf("pxe-%d", i), - Type: machine.TypeUnknown, - IPs: []netip.Addr{ips[i+1]}, - Memory: set.options.MemMB * 1024 * 1024, - NanoCPUs: set.options.CPUs * 1000 * 1000 * 1000, - Disks: []*provision.Disk{ - { - Size: uint64(set.options.DiskGB) * 1024 * 1024 * 1024, - }, - }, - PXEBooted: true, - TFTPServer: set.options.BootSource.String(), - IPXEBootFilename: "undionly.kpxe", - SkipInjectingConfig: true, - DefaultBootOrder: set.options.DefaultBootOrder, - }) - } - - set.cluster, err = set.provisioner.Create(ctx, request) - if err != nil { - return err - } - - return nil -} - -// TearDown the set of VMs. -func (set *Set) TearDown(ctx context.Context) error { - if set.cluster != nil { - if err := set.provisioner.Destroy(ctx, set.cluster); err != nil { - return err - } - - set.cluster = nil - } - - return nil -} - -// BridgeIP returns the IP of the gateway (bridge). -func (set *Set) BridgeIP() netip.Addr { - return set.bridgeIP -} - -// Nodes return information about PXE VMs. -func (set *Set) Nodes() []provision.NodeInfo { - return set.cluster.Info().ExtraNodes -} diff --git a/website/content/v0.6/Overview/installation.md b/website/content/v0.6/Overview/installation.md index 82d5e3f3c..a5b929d68 100644 --- a/website/content/v0.6/Overview/installation.md +++ b/website/content/v0.6/Overview/installation.md @@ -30,6 +30,7 @@ variables or as variables in the `clusterctl` configuration: Sidero provides three endpoints which should be made available to the infrastructure: +- UDP port 67 for the proxy DHCP service (providing PXE boot information to the nodes, but no IPAM) - TCP port 8081 which provides combined iPXE, metadata and gRPC service (external endpoint should be specified as `SIDERO_CONTROLLER_MANAGER_API_ENDPOINT` and `SIDERO_CONTROLLER_MANAGER_API_PORT`) - UDP port 69 for the TFTP service (DHCP server should point the nodes to PXE boot from that IP) - UDP port 51821 for the SideroLink Wireguard service (external endpoint should be specified as `SIDERO_CONTROLLER_MANAGER_SIDEROLINK_ENDPOINT` and `SIDERO_CONTROLLER_MANAGER_SIDEROLINK_PORT`) diff --git a/website/content/v0.6/Overview/whatsnew.md b/website/content/v0.6/Overview/whatsnew.md index b065277d4..d45ea8490 100644 --- a/website/content/v0.6/Overview/whatsnew.md +++ b/website/content/v0.6/Overview/whatsnew.md @@ -4,62 +4,35 @@ weight: 15 title: What's New --- -### Cluster API v1.x (v1beta1) +## New API Version for `metal.sidero.dev` Resources -This release of Sidero brings compatibility with CAPI v1.x (v1beta1). +Resources under `metal.sidero.dev` (`Server`, `ServerClass`, `Environment`) now have a new version `v1alpha2`. +Old version `v1alpha1` is still supported, but it is recommended to update templates to use the new resource version. -### Cluster Template +### `Server` Changes -Sidero ships with new cluster template without `init` nodes. -This template is only compatible with Talos >= 0.14 (it requires SideroLink feature which was introduced in Talos 0.14). +Hardware information was restructured and extended when compared with `v1alpha1`: -On upgrade, Sidero supports clusters running Talos < 0.14 if they were created before the upgrade. -Use [legacy template](https://github.com/talos-systems/sidero/blob/release-0.4/templates/cluster-template.yaml) to deploy clusters with Talos < 0.14. +* `.spec.systemInformation` -> `.spec.hardware.system` +* `.spec.cpu` -> `.spec.hardware.compute.processors[]` -### New `MetalMachines` Conditions +### `ServerClass` Changes -New set of conditions is now available which can simplify cluster troubleshooting: +* `.spec.qualifiers.systemInformation` -> `.spec.qualifiers.system` +* `.spec.qualifiers.cpu` -> `.spec.qualifiers.hardware.compute.processors[]` -- `TalosConfigLoaded` is set to false when the config load has failed. -- `TalosConfigValidated` is set to false when the config validation -fails on the node. -- `TalosInstalled` is set to true/false when talos installer finishes. +## Metadata Server -Requires Talos >= v0.14. +Sidero Metadata Server no longer depends on the version of Talos machinery library it is built with. +Sidero should be able to process machine config for future versions of Talos. -### Machine Addresses +## Sidero Agent -Sidero now populates `MetalMachine` addresses with the ones discovered from Siderolink server events. -Which is then propagated to CAPI `Machine` resources. +Sidero Agent now runs DHCP client in the userland, on the link which was used to PXE boot the machine. +This allows to run Sidero Agent on the machine with several autoconfigured network interfaces, when one of them is used for the management network. -Requires Talos >= v0.14. +## DHCP Proxy -### SideroLink - -Sidero now connects to all servers using SideroLink (available only with Talos >= 0.14). -This enables streaming of kernel logs and events back to Sidero. - -All server logs can now be viewed by getting logs of one of the container of the `sidero-controller-manager`: - -```bash -kubectl logs -f -n sidero-system deployment/sidero-controller-manager -c serverlogs -``` - -Events: - -```bash -kubectl logs -f -n sidero-system deployment/sidero-controller-manager -c serverevents -``` - -### iPXE Boot From Disk Method - -iPXE boot from disk method can now be set not only on the global level, but also in the Server and ServerClass specs. - -### IPMI PXE Method - -IPMI PXE method (UEFI, BIOS) can now be configured with `SIDERO_CONTROLLER_MANAGER_IPMI_PXE_METHOD` while installing Sidero. - -### Retry PXE Boot - -Sidero server controller now keeps track of Talos installation progress. -Now the node will be PXE booted until Talos installation succeeds. +Sidero Controller Manager now includes DHCP proxy which augments DHCP response with additional PXE boot options. +When enabled, DHCP server in the environment only handles IP allocation and network configuration, while DHCP proxy +provides PXE boot information automatically based on the architecture and boot method.