diff --git a/pkg/controllers/deprovisioning/helpers.go b/pkg/controllers/deprovisioning/helpers.go index 8ddff93df0..d776fd1fca 100644 --- a/pkg/controllers/deprovisioning/helpers.go +++ b/pkg/controllers/deprovisioning/helpers.go @@ -39,7 +39,7 @@ import ( //nolint:gocyclo func simulateScheduling(ctx context.Context, kubeClient client.Client, cluster *state.Cluster, provisioner *provisioning.Provisioner, - candidateNodes ...CandidateNode) (newNodes []*pscheduling.Node, allPodsScheduled bool, err error) { + candidateNodes ...CandidateNode) (newNodes []*pscheduling.Machine, allPodsScheduled bool, err error) { candidateNodeNames := sets.NewString(lo.Map(candidateNodes, func(t CandidateNode, i int) string { return t.Name })...) nodes := cluster.Nodes() @@ -198,7 +198,7 @@ func candidateNodes(ctx context.Context, cluster *state.Cluster, kubeClient clie } // Skip nodes that aren't initialized - // This also means that the real Node doesn't exist for it + // This also means that the real Machine doesn't exist for it if !n.Initialized() { return true } @@ -313,7 +313,7 @@ func clamp(min, val, max float64) float64 { return val } -// mapNodes maps from a list of *v1.Node to candidateNode +// mapNodes maps from a list of *v1.Machine to candidateNode func mapNodes(nodes []*v1.Node, candidateNodes []CandidateNode) []CandidateNode { verifyNodeNames := sets.NewString(lo.Map(nodes, func(t *v1.Node, i int) string { return t.Name })...) var ret []CandidateNode diff --git a/pkg/controllers/deprovisioning/multinodeconsolidation.go b/pkg/controllers/deprovisioning/multinodeconsolidation.go index f707155387..7f06c9fadc 100644 --- a/pkg/controllers/deprovisioning/multinodeconsolidation.go +++ b/pkg/controllers/deprovisioning/multinodeconsolidation.go @@ -129,7 +129,7 @@ func (m *MultiNodeConsolidation) firstNNodeConsolidationOption(ctx context.Conte // This code sees that t3a.small is the cheapest type in both lists and filters it and anything more expensive out // leaving the valid consolidation: // nodes=[t3a.2xlarge, t3a.2xlarge, t3a.small] -> 1 of t3a.nano -func filterOutSameType(newNode *scheduling.Node, consolidate []CandidateNode) []*cloudprovider.InstanceType { +func filterOutSameType(newNode *scheduling.Machine, consolidate []CandidateNode) []*cloudprovider.InstanceType { existingInstanceTypes := sets.NewString() nodePricesByInstanceType := map[string]float64{} diff --git a/pkg/controllers/deprovisioning/suite_test.go b/pkg/controllers/deprovisioning/suite_test.go index 7d12e30c1d..efbdcb498f 100644 --- a/pkg/controllers/deprovisioning/suite_test.go +++ b/pkg/controllers/deprovisioning/suite_test.go @@ -32,6 +32,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/tools/record" clock "k8s.io/utils/clock/testing" . "knative.dev/pkg/logging/testing" "knative.dev/pkg/ptr" @@ -47,6 +48,7 @@ import ( "github.com/aws/karpenter-core/pkg/controllers/provisioning" "github.com/aws/karpenter-core/pkg/controllers/state" "github.com/aws/karpenter-core/pkg/controllers/state/informer" + "github.com/aws/karpenter-core/pkg/events" "github.com/aws/karpenter-core/pkg/operator/controller" "github.com/aws/karpenter-core/pkg/operator/scheme" "github.com/aws/karpenter-core/pkg/test" @@ -60,7 +62,6 @@ var deprovisioningController *deprovisioning.Controller var provisioningController controller.Controller var provisioner *provisioning.Provisioner var cloudProvider *fake.CloudProvider -var recorder *test.EventRecorder var nodeStateController controller.Controller var fakeClock *clock.FakeClock var onDemandInstances []*cloudprovider.InstanceType @@ -82,9 +83,8 @@ var _ = BeforeSuite(func() { fakeClock = clock.NewFakeClock(time.Now()) cluster = state.NewCluster(fakeClock, env.Client, cloudProvider) nodeStateController = informer.NewNodeController(env.Client, cluster) - recorder = test.NewEventRecorder() - provisioner = provisioning.NewProvisioner(ctx, env.Client, env.KubernetesInterface.CoreV1(), recorder, cloudProvider, cluster) - provisioningController = provisioning.NewController(env.Client, provisioner, recorder) + provisioner = provisioning.NewProvisioner(ctx, env.Client, env.KubernetesInterface.CoreV1(), events.NewRecorder(&record.FakeRecorder{}), cloudProvider, cluster) + provisioningController = provisioning.NewController(env.Client, provisioner, events.NewRecorder(&record.FakeRecorder{})) }) var _ = AfterSuite(func() { @@ -122,13 +122,12 @@ var _ = BeforeEach(func() { mostExpensiveInstance = onDemandInstances[len(onDemandInstances)-1] mostExpensiveOffering = mostExpensiveInstance.Offerings[0] - recorder.Reset() // ensure any waiters on our clock are allowed to proceed before resetting our clock time for fakeClock.HasWaiters() { fakeClock.Step(1 * time.Minute) } fakeClock.SetTime(time.Now()) - deprovisioningController = deprovisioning.NewController(fakeClock, env.Client, provisioner, cloudProvider, recorder, cluster) + deprovisioningController = deprovisioning.NewController(fakeClock, env.Client, provisioner, cloudProvider, events.NewRecorder(&record.FakeRecorder{}), cluster) // Reset Feature Flags to test defaults ctx = settings.ToContext(ctx, test.Settings(test.SettingsOptions{DriftEnabled: true})) }) @@ -1793,11 +1792,12 @@ var _ = Describe("Parallelization", func() { }, time.Second*10).Should(Succeed()) wg.Wait() // Add a new pending pod that should schedule while node is not yet deleted - pods := ExpectProvisionedNoBinding(ctx, env.Client, provisioningController, provisioner, test.UnschedulablePod()) + pod = test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, provisioner, pod) nodes := &v1.NodeList{} Expect(env.Client.List(ctx, nodes)).To(Succeed()) Expect(len(nodes.Items)).To(Equal(2)) - Expect(pods[0].Spec.NodeName).NotTo(Equal(node.Name)) + ExpectScheduled(ctx, env.Client, pod) }) It("should not consolidate a node that is launched for pods on a deleting node", func() { labels := map[string]string{ @@ -1838,7 +1838,7 @@ var _ = Describe("Parallelization", func() { pods = append(pods, pod) } ExpectApplied(ctx, env.Client, rs, prov) - ExpectProvisionedNoBinding(ctx, env.Client, provisioningController, provisioner, lo.Map(pods, func(p *v1.Pod, _ int) *v1.Pod { return p.DeepCopy() })...) + ExpectProvisionedNoBinding(ctx, env.Client, provisioner, lo.Map(pods, func(p *v1.Pod, _ int) *v1.Pod { return p.DeepCopy() })...) nodeList := &v1.NodeList{} Expect(env.Client.List(ctx, nodeList)).To(Succeed()) @@ -1847,13 +1847,10 @@ var _ = Describe("Parallelization", func() { // Update cluster state with new node ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(&nodeList.Items[0])) - // Reset the bindings so we can re-record bindings - recorder.ResetBindings() - // Mark the node for deletion and re-trigger reconciliation oldNodeName := nodeList.Items[0].Name cluster.MarkForDeletion(nodeList.Items[0].Name) - ExpectProvisionedNoBinding(ctx, env.Client, provisioningController, provisioner, lo.Map(pods, func(p *v1.Pod, _ int) *v1.Pod { return p.DeepCopy() })...) + ExpectProvisionedNoBinding(ctx, env.Client, provisioner, lo.Map(pods, func(p *v1.Pod, _ int) *v1.Pod { return p.DeepCopy() })...) // Make sure that the cluster state is aware of the current node state Expect(env.Client.List(ctx, nodeList)).To(Succeed()) diff --git a/pkg/controllers/deprovisioning/types.go b/pkg/controllers/deprovisioning/types.go index a0436af205..45ae8b255c 100644 --- a/pkg/controllers/deprovisioning/types.go +++ b/pkg/controllers/deprovisioning/types.go @@ -63,7 +63,7 @@ func (a action) String() string { type Command struct { nodesToRemove []*v1.Node action action - replacementNodes []*scheduling.Node + replacementNodes []*scheduling.Machine } func (o Command) String() string { diff --git a/pkg/controllers/inflightchecks/suite_test.go b/pkg/controllers/inflightchecks/suite_test.go index 724751b177..eaff4b577a 100644 --- a/pkg/controllers/inflightchecks/suite_test.go +++ b/pkg/controllers/inflightchecks/suite_test.go @@ -17,6 +17,7 @@ package inflightchecks_test import ( "context" "fmt" + "sync" "testing" "time" @@ -47,7 +48,7 @@ var inflightController controller.Controller var env *test.Environment var fakeClock *clock.FakeClock var cp *fake.CloudProvider -var recorder *test.EventRecorder +var recorder *FakeEventRecorder func TestAPIs(t *testing.T) { ctx = TestContextWithLogger(t) @@ -60,7 +61,7 @@ var _ = BeforeSuite(func() { env = test.NewEnvironment(scheme.Scheme, test.WithCRDs(apis.CRDs...)) ctx = settings.ToContext(ctx, test.Settings()) cp = &fake.CloudProvider{} - recorder = test.NewEventRecorder() + recorder = NewFakeEventRecorder() inflightController = inflightchecks.NewController(fakeClock, env.Client, recorder, cp) }) @@ -183,6 +184,49 @@ var _ = Describe("Controller", func() { }) }) +var _ events.Recorder = (*FakeEventRecorder)(nil) + +// FakeEventRecorder is a mock event recorder that is used to facilitate testing. +type FakeEventRecorder struct { + mu sync.RWMutex + calls map[string]int + events []events.Event +} + +func NewFakeEventRecorder() *FakeEventRecorder { + return &FakeEventRecorder{ + calls: map[string]int{}, + } +} + +func (e *FakeEventRecorder) Publish(evt events.Event) { + e.mu.Lock() + defer e.mu.Unlock() + e.events = append(e.events, evt) + e.calls[evt.Reason]++ +} + +func (e *FakeEventRecorder) Calls(reason string) int { + e.mu.RLock() + defer e.mu.RUnlock() + return e.calls[reason] +} + +func (e *FakeEventRecorder) Reset() { + e.mu.Lock() + defer e.mu.Unlock() + e.events = nil + e.calls = map[string]int{} +} + +func (e *FakeEventRecorder) ForEachEvent(f func(evt events.Event)) { + e.mu.RLock() + defer e.mu.RUnlock() + for _, e := range e.events { + f(e) + } +} + func ExpectDetectedEvent(msg string) { foundEvent := false recorder.ForEachEvent(func(evt events.Event) { diff --git a/pkg/controllers/machine/registration.go b/pkg/controllers/machine/registration.go index b4540cad92..585916d905 100644 --- a/pkg/controllers/machine/registration.go +++ b/pkg/controllers/machine/registration.go @@ -43,7 +43,7 @@ func (r *Registration) Reconcile(ctx context.Context, machine *v1alpha5.Machine) node, err := nodeForMachine(ctx, r.kubeClient, machine) if err != nil { if IsNodeNotFoundError(err) { - machine.StatusConditions().MarkFalse(v1alpha5.MachineRegistered, "NodeNotFound", "Node not registered with cluster") + machine.StatusConditions().MarkFalse(v1alpha5.MachineRegistered, "NodeNotFound", "Machine not registered with cluster") return reconcile.Result{RequeueAfter: registrationTTL}, nil // Requeue later to check up to the registration timeout } if IsDuplicateNodeError(err) { @@ -66,7 +66,7 @@ func (r *Registration) syncNode(ctx context.Context, machine *v1alpha5.Machine, node.Labels = lo.Assign(node.Labels, machine.Labels) node.Annotations = lo.Assign(node.Annotations, machine.Annotations) - // Sync all taints inside of Machine into the Node taints + // Sync all taints inside of Machine into the Machine taints node.Spec.Taints = scheduling.Taints(node.Spec.Taints).Merge(machine.Spec.Taints) if !machine.StatusConditions().GetCondition(v1alpha5.MachineRegistered).IsTrue() { node.Spec.Taints = scheduling.Taints(node.Spec.Taints).Merge(machine.Spec.StartupTaints) diff --git a/pkg/controllers/machine/suite_test.go b/pkg/controllers/machine/suite_test.go index 5b55df5fa9..eff87c041b 100644 --- a/pkg/controllers/machine/suite_test.go +++ b/pkg/controllers/machine/suite_test.go @@ -25,6 +25,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" clock "k8s.io/utils/clock/testing" . "knative.dev/pkg/logging/testing" "sigs.k8s.io/controller-runtime/pkg/cache" @@ -36,6 +37,7 @@ import ( "github.com/aws/karpenter-core/pkg/cloudprovider/fake" "github.com/aws/karpenter-core/pkg/controllers/machine" "github.com/aws/karpenter-core/pkg/controllers/machine/terminator" + "github.com/aws/karpenter-core/pkg/events" "github.com/aws/karpenter-core/pkg/operator/controller" "github.com/aws/karpenter-core/pkg/operator/scheme" . "github.com/aws/karpenter-core/pkg/test/expectations" @@ -65,9 +67,8 @@ var _ = BeforeSuite(func() { ctx = settings.ToContext(ctx, test.Settings()) cloudProvider = fake.NewCloudProvider() - recorder := test.NewEventRecorder() - terminator := terminator.NewTerminator(fakeClock, env.Client, cloudProvider, terminator.NewEvictionQueue(ctx, env.KubernetesInterface.CoreV1(), recorder)) - machineController = machine.NewController(fakeClock, env.Client, cloudProvider, terminator, recorder) + terminator := terminator.NewTerminator(fakeClock, env.Client, cloudProvider, terminator.NewEvictionQueue(ctx, env.KubernetesInterface.CoreV1(), events.NewRecorder(&record.FakeRecorder{}))) + machineController = machine.NewController(fakeClock, env.Client, cloudProvider, terminator, events.NewRecorder(&record.FakeRecorder{})) }) var _ = AfterSuite(func() { diff --git a/pkg/controllers/provisioning/provisioner.go b/pkg/controllers/provisioning/provisioner.go index 7c85bb14a9..9189549ef2 100644 --- a/pkg/controllers/provisioning/provisioner.go +++ b/pkg/controllers/provisioning/provisioner.go @@ -108,48 +108,18 @@ func (p *Provisioner) Reconcile(ctx context.Context, _ reconcile.Request) (resul return reconcile.Result{}, nil } - // We collect the nodes with their used capacities before we get the list of pending pods. This ensures that - // the node capacities we schedule against are always >= what the actual capacity is at any given instance. This - // prevents over-provisioning at the cost of potentially under-provisioning which will self-heal during the next - // scheduling loop when we launch a new node. When this order is reversed, our node capacity may be reduced by pods - // that have bound which we then provision new un-needed capacity for. - // ------- - // We don't consider the nodes that are MarkedForDeletion since this capacity shouldn't be considered - // as persistent capacity for the cluster (since it will soon be removed). Additionally, we are scheduling for - // the pods that are on these nodes so the MarkedForDeletion node capacity can't be considered. - nodes := p.cluster.Nodes() - - // Get pods, exit if nothing to do - pendingPods, err := p.GetPendingPods(ctx) - if err != nil { - return reconcile.Result{}, err - } - // Get pods from nodes that are preparing for deletion - // We do this after getting the pending pods so that we undershoot if pods are - // actively migrating from a node that is being deleted - // NOTE: The assumption is that these nodes are cordoned and no additional pods will schedule to them - deletingNodePods, err := nodes.Deleting().Pods(ctx, p.kubeClient) - if err != nil { - return reconcile.Result{}, err - } - pods := append(pendingPods, deletingNodePods...) - if len(pods) == 0 { - return reconcile.Result{}, nil - } - // Schedule pods to potential nodes, exit if nothing to do - machines, err := p.schedule(ctx, pods, nodes.Active()) + machines, _, err := p.Schedule(ctx) if err != nil { return reconcile.Result{}, err } if len(machines) == 0 { return reconcile.Result{}, nil } - - nodeNames, err := p.LaunchMachines(ctx, machines, RecordPodNomination) + machineNames, err := p.LaunchMachines(ctx, machines, RecordPodNomination) // Any successfully created node is going to have the nodeName value filled in the slice - successfullyCreatedNodeCount := lo.CountBy(nodeNames, func(name string) bool { return name != "" }) + successfullyCreatedNodeCount := lo.CountBy(machineNames, func(name string) bool { return name != "" }) metrics.NodesCreatedCounter.WithLabelValues(metrics.ProvisioningReason).Add(float64(successfullyCreatedNodeCount)) return reconcile.Result{}, err @@ -157,7 +127,7 @@ func (p *Provisioner) Reconcile(ctx context.Context, _ reconcile.Request) (resul // LaunchMachines launches nodes passed into the function in parallel. It returns a slice of the successfully created node // names as well as a multierr of any errors that occurred while launching nodes -func (p *Provisioner) LaunchMachines(ctx context.Context, machines []*scheduler.Node, opts ...functional.Option[LaunchOptions]) ([]string, error) { +func (p *Provisioner) LaunchMachines(ctx context.Context, machines []*scheduler.Machine, opts ...functional.Option[LaunchOptions]) ([]string, error) { // Launch capacity and bind pods errs := make([]error, len(machines)) machineNames := make([]string, len(machines)) @@ -167,7 +137,7 @@ func (p *Provisioner) LaunchMachines(ctx context.Context, machines []*scheduler. // register the provisioner on the context so we can pull it off for tagging purposes // TODO: rethink this, maybe just pass the provisioner down instead of hiding it in the context? ctx = injection.WithNamespacedName(ctx, types.NamespacedName{Name: machines[i].Labels[v1alpha5.ProvisionerNameLabelKey]}) - if machineName, err := p.launch(ctx, machines[i], opts...); err != nil { + if machineName, err := p.Launch(ctx, machines[i], opts...); err != nil { errs[i] = fmt.Errorf("launching machine, %w", err) } else { machineNames[i] = machineName @@ -224,7 +194,7 @@ func (p *Provisioner) consolidationWarnings(ctx context.Context, po v1.Pod) { } } -// nolint: gocyclo +//nolint:gocyclo func (p *Provisioner) NewScheduler(ctx context.Context, pods []*v1.Pod, stateNodes []*state.Node, opts scheduler.SchedulerOptions) (*scheduler.Scheduler, error) { // Build node templates var machines []*scheduler.MachineTemplate @@ -293,20 +263,45 @@ func (p *Provisioner) NewScheduler(ctx context.Context, pods []*v1.Pod, stateNod return scheduler.NewScheduler(ctx, p.kubeClient, machines, provisionerList.Items, p.cluster, stateNodes, topology, instanceTypes, daemonSetPods, p.recorder, opts), nil } -func (p *Provisioner) schedule(ctx context.Context, pods []*v1.Pod, stateNodes []*state.Node) ([]*scheduler.Node, error) { +func (p *Provisioner) Schedule(ctx context.Context) ([]*scheduler.Machine, []*scheduler.ExistingNode, error) { defer metrics.Measure(schedulingDuration.WithLabelValues(injection.GetNamespacedName(ctx).Name))() - scheduler, err := p.NewScheduler(ctx, pods, stateNodes, scheduler.SchedulerOptions{}) + // We collect the nodes with their used capacities before we get the list of pending pods. This ensures that + // the node capacities we schedule against are always >= what the actual capacity is at any given instance. This + // prevents over-provisioning at the cost of potentially under-provisioning which will self-heal during the next + // scheduling loop when we Launch a new node. When this order is reversed, our node capacity may be reduced by pods + // that have bound which we then provision new un-needed capacity for. + // ------- + // We don't consider the nodes that are MarkedForDeletion since this capacity shouldn't be considered + // as persistent capacity for the cluster (since it will soon be removed). Additionally, we are scheduling for + // the pods that are on these nodes so the MarkedForDeletion node capacity can't be considered. + nodes := p.cluster.Nodes() + + // Get pods, exit if nothing to do + pendingPods, err := p.GetPendingPods(ctx) if err != nil { - return nil, fmt.Errorf("creating scheduler, %w", err) + return nil, nil, err } - - // don't care about inflight scheduling results in this context - nodes, _, err := scheduler.Solve(ctx, pods) - return nodes, err + // Get pods from nodes that are preparing for deletion + // We do this after getting the pending pods so that we undershoot if pods are + // actively migrating from a node that is being deleted + // NOTE: The assumption is that these nodes are cordoned and no additional pods will schedule to them + deletingNodePods, err := nodes.Deleting().Pods(ctx, p.kubeClient) + if err != nil { + return nil, nil, err + } + pods := append(pendingPods, deletingNodePods...) + if len(pods) == 0 { + return nil, nil, nil + } + scheduler, err := p.NewScheduler(ctx, pods, nodes.Active(), scheduler.SchedulerOptions{}) + if err != nil { + return nil, nil, fmt.Errorf("creating scheduler, %w", err) + } + return scheduler.Solve(ctx, pods) } -func (p *Provisioner) launch(ctx context.Context, machine *scheduler.Node, opts ...functional.Option[LaunchOptions]) (string, error) { +func (p *Provisioner) Launch(ctx context.Context, machine *scheduler.Machine, opts ...functional.Option[LaunchOptions]) (string, error) { // Check limits latest := &v1alpha5.Provisioner{} if err := p.kubeClient.Get(ctx, types.NamespacedName{Name: machine.ProvisionerName}, latest); err != nil { diff --git a/pkg/controllers/provisioning/scheduling/instance_selection_test.go b/pkg/controllers/provisioning/scheduling/instance_selection_test.go index ea2a44c44e..3c7598828b 100644 --- a/pkg/controllers/provisioning/scheduling/instance_selection_test.go +++ b/pkg/controllers/provisioning/scheduling/instance_selection_test.go @@ -71,32 +71,35 @@ var _ = Describe("Instance Type Selection", func() { // passed to the cloud provider is unique. It("should schedule on one of the cheapest instances", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod()) - node := ExpectScheduled(ctx, env.Client, pod[0]) + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + node := ExpectScheduled(ctx, env.Client, pod) Expect(nodePrice(node)).To(Equal(minPrice)) }) It("should schedule on one of the cheapest instances (pod arch = amd64)", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{{ Key: v1.LabelArchStable, Operator: v1.NodeSelectorOpIn, Values: []string{v1alpha5.ArchitectureAmd64}, - }}})) - node := ExpectScheduled(ctx, env.Client, pod[0]) + }}}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + node := ExpectScheduled(ctx, env.Client, pod) Expect(nodePrice(node)).To(Equal(minPrice)) // ensure that the entire list of instance types match the label ExpectInstancesWithLabel(supportedInstanceTypes(cloudProv.CreateCalls[0]), v1.LabelArchStable, v1alpha5.ArchitectureAmd64) }) It("should schedule on one of the cheapest instances (pod arch = arm64)", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{{ Key: v1.LabelArchStable, Operator: v1.NodeSelectorOpIn, Values: []string{v1alpha5.ArchitectureArm64}, - }}})) - node := ExpectScheduled(ctx, env.Client, pod[0]) + }}}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + node := ExpectScheduled(ctx, env.Client, pod) Expect(nodePrice(node)).To(Equal(minPrice)) ExpectInstancesWithLabel(supportedInstanceTypes(cloudProv.CreateCalls[0]), v1.LabelArchStable, v1alpha5.ArchitectureArm64) }) @@ -109,8 +112,9 @@ var _ = Describe("Instance Type Selection", func() { }, } ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod()) - node := ExpectScheduled(ctx, env.Client, pod[0]) + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + node := ExpectScheduled(ctx, env.Client, pod) Expect(nodePrice(node)).To(Equal(minPrice)) ExpectInstancesWithLabel(supportedInstanceTypes(cloudProv.CreateCalls[0]), v1.LabelArchStable, v1alpha5.ArchitectureAmd64) }) @@ -123,8 +127,9 @@ var _ = Describe("Instance Type Selection", func() { }, } ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod()) - node := ExpectScheduled(ctx, env.Client, pod[0]) + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + node := ExpectScheduled(ctx, env.Client, pod) Expect(nodePrice(node)).To(Equal(minPrice)) ExpectInstancesWithLabel(supportedInstanceTypes(cloudProv.CreateCalls[0]), v1.LabelArchStable, v1alpha5.ArchitectureArm64) }) @@ -137,20 +142,22 @@ var _ = Describe("Instance Type Selection", func() { }, } ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod()) - node := ExpectScheduled(ctx, env.Client, pod[0]) + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + node := ExpectScheduled(ctx, env.Client, pod) Expect(nodePrice(node)).To(Equal(minPrice)) ExpectInstancesWithLabel(supportedInstanceTypes(cloudProv.CreateCalls[0]), v1.LabelOSStable, string(v1.Windows)) }) It("should schedule on one of the cheapest instances (pod os = windows)", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{{ Key: v1.LabelOSStable, Operator: v1.NodeSelectorOpIn, Values: []string{string(v1.Windows)}, - }}})) - node := ExpectScheduled(ctx, env.Client, pod[0]) + }}}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + node := ExpectScheduled(ctx, env.Client, pod) Expect(nodePrice(node)).To(Equal(minPrice)) ExpectInstancesWithLabel(supportedInstanceTypes(cloudProv.CreateCalls[0]), v1.LabelOSStable, string(v1.Windows)) }) @@ -163,32 +170,35 @@ var _ = Describe("Instance Type Selection", func() { }, } ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod()) - node := ExpectScheduled(ctx, env.Client, pod[0]) + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + node := ExpectScheduled(ctx, env.Client, pod) Expect(nodePrice(node)).To(Equal(minPrice)) ExpectInstancesWithLabel(supportedInstanceTypes(cloudProv.CreateCalls[0]), v1.LabelOSStable, string(v1.Windows)) }) It("should schedule on one of the cheapest instances (pod os = linux)", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{{ Key: v1.LabelOSStable, Operator: v1.NodeSelectorOpIn, Values: []string{string(v1.Linux)}, - }}})) - node := ExpectScheduled(ctx, env.Client, pod[0]) + }}}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + node := ExpectScheduled(ctx, env.Client, pod) Expect(nodePrice(node)).To(Equal(minPrice)) ExpectInstancesWithLabel(supportedInstanceTypes(cloudProv.CreateCalls[0]), v1.LabelOSStable, string(v1.Linux)) }) It("should schedule on one of the cheapest instances (pod os = linux)", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{{ Key: v1.LabelOSStable, Operator: v1.NodeSelectorOpIn, Values: []string{string(v1.Linux)}, - }}})) - node := ExpectScheduled(ctx, env.Client, pod[0]) + }}}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + node := ExpectScheduled(ctx, env.Client, pod) Expect(nodePrice(node)).To(Equal(minPrice)) ExpectInstancesWithLabel(supportedInstanceTypes(cloudProv.CreateCalls[0]), v1.LabelOSStable, string(v1.Linux)) }) @@ -201,20 +211,22 @@ var _ = Describe("Instance Type Selection", func() { }, } ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod()) - node := ExpectScheduled(ctx, env.Client, pod[0]) + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + node := ExpectScheduled(ctx, env.Client, pod) Expect(nodePrice(node)).To(Equal(minPrice)) ExpectInstancesWithLabel(supportedInstanceTypes(cloudProv.CreateCalls[0]), v1.LabelTopologyZone, "test-zone-2") }) It("should schedule on one of the cheapest instances (pod zone = test-zone-2)", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{{ Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-2"}, - }}})) - node := ExpectScheduled(ctx, env.Client, pod[0]) + }}}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + node := ExpectScheduled(ctx, env.Client, pod) Expect(nodePrice(node)).To(Equal(minPrice)) ExpectInstancesWithLabel(supportedInstanceTypes(cloudProv.CreateCalls[0]), v1.LabelTopologyZone, "test-zone-2") }) @@ -227,20 +239,22 @@ var _ = Describe("Instance Type Selection", func() { }, } ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod()) - node := ExpectScheduled(ctx, env.Client, pod[0]) + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + node := ExpectScheduled(ctx, env.Client, pod) Expect(nodePrice(node)).To(Equal(minPrice)) ExpectInstancesWithLabel(supportedInstanceTypes(cloudProv.CreateCalls[0]), v1alpha5.LabelCapacityType, v1alpha5.CapacityTypeSpot) }) It("should schedule on one of the cheapest instances (pod ct = spot)", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{{ Key: v1alpha5.LabelCapacityType, Operator: v1.NodeSelectorOpIn, Values: []string{v1alpha5.CapacityTypeSpot}, - }}})) - node := ExpectScheduled(ctx, env.Client, pod[0]) + }}}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + node := ExpectScheduled(ctx, env.Client, pod) Expect(nodePrice(node)).To(Equal(minPrice)) ExpectInstancesWithLabel(supportedInstanceTypes(cloudProv.CreateCalls[0]), v1alpha5.LabelCapacityType, v1alpha5.CapacityTypeSpot) }) @@ -258,14 +272,15 @@ var _ = Describe("Instance Type Selection", func() { }, } ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod()) - node := ExpectScheduled(ctx, env.Client, pod[0]) + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + node := ExpectScheduled(ctx, env.Client, pod) Expect(nodePrice(node)).To(Equal(minPrice)) ExpectInstancesWithOffering(supportedInstanceTypes(cloudProv.CreateCalls[0]), v1alpha5.CapacityTypeOnDemand, "test-zone-1") }) It("should schedule on one of the cheapest instances (pod ct = spot, pod zone = test-zone-1)", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{{ Key: v1alpha5.LabelCapacityType, Operator: v1.NodeSelectorOpIn, @@ -276,8 +291,9 @@ var _ = Describe("Instance Type Selection", func() { Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1"}, }, - }})) - node := ExpectScheduled(ctx, env.Client, pod[0]) + }}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + node := ExpectScheduled(ctx, env.Client, pod) Expect(nodePrice(node)).To(Equal(minPrice)) ExpectInstancesWithOffering(supportedInstanceTypes(cloudProv.CreateCalls[0]), v1alpha5.CapacityTypeSpot, "test-zone-1") }) @@ -290,13 +306,14 @@ var _ = Describe("Instance Type Selection", func() { }, } ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{{ Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-2"}, - }}})) - node := ExpectScheduled(ctx, env.Client, pod[0]) + }}}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + node := ExpectScheduled(ctx, env.Client, pod) Expect(nodePrice(node)).To(Equal(minPrice)) ExpectInstancesWithOffering(supportedInstanceTypes(cloudProv.CreateCalls[0]), v1alpha5.CapacityTypeSpot, "test-zone-2") }) @@ -324,8 +341,9 @@ var _ = Describe("Instance Type Selection", func() { }, } ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod()) - node := ExpectScheduled(ctx, env.Client, pod[0]) + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + node := ExpectScheduled(ctx, env.Client, pod) Expect(nodePrice(node)).To(Equal(minPrice)) ExpectInstancesWithOffering(supportedInstanceTypes(cloudProv.CreateCalls[0]), v1alpha5.CapacityTypeOnDemand, "test-zone-1") ExpectInstancesWithLabel(supportedInstanceTypes(cloudProv.CreateCalls[0]), v1.LabelOSStable, string(v1.Windows)) @@ -345,7 +363,7 @@ var _ = Describe("Instance Type Selection", func() { }, } ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ { Key: v1alpha5.LabelCapacityType, @@ -357,8 +375,9 @@ var _ = Describe("Instance Type Selection", func() { Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-2"}, }, - }})) - node := ExpectScheduled(ctx, env.Client, pod[0]) + }}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + node := ExpectScheduled(ctx, env.Client, pod) Expect(nodePrice(node)).To(Equal(minPrice)) ExpectInstancesWithOffering(supportedInstanceTypes(cloudProv.CreateCalls[0]), v1alpha5.CapacityTypeSpot, "test-zone-2") ExpectInstancesWithLabel(supportedInstanceTypes(cloudProv.CreateCalls[0]), v1.LabelOSStable, string(v1.Linux)) @@ -366,7 +385,7 @@ var _ = Describe("Instance Type Selection", func() { }) It("should schedule on one of the cheapest instances (pod ct = spot/test-zone-2/amd64/linux)", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ { Key: v1.LabelArchStable, @@ -388,8 +407,9 @@ var _ = Describe("Instance Type Selection", func() { Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-2"}, }, - }})) - node := ExpectScheduled(ctx, env.Client, pod[0]) + }}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + node := ExpectScheduled(ctx, env.Client, pod) Expect(nodePrice(node)).To(Equal(minPrice)) ExpectInstancesWithOffering(supportedInstanceTypes(cloudProv.CreateCalls[0]), v1alpha5.CapacityTypeSpot, "test-zone-2") ExpectInstancesWithLabel(supportedInstanceTypes(cloudProv.CreateCalls[0]), v1.LabelOSStable, string(v1.Linux)) @@ -403,15 +423,16 @@ var _ = Describe("Instance Type Selection", func() { Expect(len(cloudProv.InstanceTypes)).To(BeNumerically(">", 0)) ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ { Key: v1.LabelArchStable, Operator: v1.NodeSelectorOpIn, Values: []string{v1alpha5.ArchitectureArm64}, }, - }})) - ExpectNotScheduled(ctx, env.Client, pod[0]) + }}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + ExpectNotScheduled(ctx, env.Client, pod) Expect(cloudProv.CreateCalls).To(HaveLen(0)) }) It("should not schedule if no instance type matches selector (pod arch = arm zone=test-zone-2)", func() { @@ -426,7 +447,7 @@ var _ = Describe("Instance Type Selection", func() { }) Expect(len(cloudProv.InstanceTypes)).To(BeNumerically(">", 0)) ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ { Key: v1.LabelArchStable, @@ -438,8 +459,9 @@ var _ = Describe("Instance Type Selection", func() { Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-2"}, }, - }})) - ExpectNotScheduled(ctx, env.Client, pod[0]) + }}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + ExpectNotScheduled(ctx, env.Client, pod) Expect(cloudProv.CreateCalls).To(HaveLen(0)) }) It("should not schedule if no instance type matches selector (prov arch = arm / pod zone=test-zone-2)", func() { @@ -462,15 +484,16 @@ var _ = Describe("Instance Type Selection", func() { } Expect(len(cloudProv.InstanceTypes)).To(BeNumerically(">", 0)) ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ { Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-2"}, }, - }})) - ExpectNotScheduled(ctx, env.Client, pod[0]) + }}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + ExpectNotScheduled(ctx, env.Client, pod) Expect(cloudProv.CreateCalls).To(HaveLen(0)) }) It("should schedule on an instance with enough resources", func() { @@ -497,8 +520,10 @@ var _ = Describe("Instance Type Selection", func() { v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%0.1f", cpu)), v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%0.1fGi", mem)), }}} - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, - test.UnschedulablePod(opts), test.UnschedulablePod(opts), test.UnschedulablePod(opts)) + pods := []*v1.Pod{ + test.UnschedulablePod(opts), test.UnschedulablePod(opts), test.UnschedulablePod(opts), + } + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) nodeNames := sets.NewString() for _, p := range pods { node := ExpectScheduled(ctx, env.Client, p) @@ -563,8 +588,9 @@ var _ = Describe("Instance Type Selection", func() { } ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod()) - node := ExpectScheduled(ctx, env.Client, pod[0]) + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels[v1.LabelInstanceTypeStable]).To(Equal("test-instance1")) }) }) diff --git a/pkg/controllers/provisioning/scheduling/node.go b/pkg/controllers/provisioning/scheduling/machine.go similarity index 80% rename from pkg/controllers/provisioning/scheduling/node.go rename to pkg/controllers/provisioning/scheduling/machine.go index 432ff5abfd..8339f6a29c 100644 --- a/pkg/controllers/provisioning/scheduling/node.go +++ b/pkg/controllers/provisioning/scheduling/machine.go @@ -29,9 +29,9 @@ import ( "github.com/aws/karpenter-core/pkg/utils/resources" ) -// Node is a set of constraints, compatible pods, and possible instance types that could fulfill these constraints. This +// Machine is a set of constraints, compatible pods, and possible instance types that could fulfill these constraints. This // will be turned into one or more actual node instances within the cluster after bin packing. -type Node struct { +type Machine struct { MachineTemplate Pods []*v1.Pod @@ -41,7 +41,7 @@ type Node struct { var nodeID int64 -func NewNode(machineTemplate *MachineTemplate, topology *Topology, daemonResources v1.ResourceList, instanceTypes []*cloudprovider.InstanceType) *Node { +func NewMachine(machineTemplate *MachineTemplate, topology *Topology, daemonResources v1.ResourceList, instanceTypes []*cloudprovider.InstanceType) *Machine { // Copy the template, and add hostname hostname := fmt.Sprintf("hostname-placeholder-%04d", atomic.AddInt64(&nodeID, 1)) topology.Register(v1.LabelHostname, hostname) @@ -52,14 +52,14 @@ func NewNode(machineTemplate *MachineTemplate, topology *Topology, daemonResourc template.InstanceTypeOptions = instanceTypes template.Requests = daemonResources - return &Node{ + return &Machine{ MachineTemplate: template, hostPortUsage: scheduling.NewHostPortUsage(), topology: topology, } } -func (m *Node) Add(ctx context.Context, pod *v1.Pod) error { +func (m *Machine) Add(ctx context.Context, pod *v1.Pod) error { // Check Taints if err := m.Taints.Tolerates(pod); err != nil { return err @@ -70,52 +70,52 @@ func (m *Node) Add(ctx context.Context, pod *v1.Pod) error { return err } - nodeRequirements := scheduling.NewRequirements(m.Requirements.Values()...) + machineRequirements := scheduling.NewRequirements(m.Requirements.Values()...) podRequirements := scheduling.NewPodRequirements(pod) - // Check Node Affinity Requirements - if err := nodeRequirements.Compatible(podRequirements); err != nil { + // Check Machine Affinity Requirements + if err := machineRequirements.Compatible(podRequirements); err != nil { return fmt.Errorf("incompatible requirements, %w", err) } - nodeRequirements.Add(podRequirements.Values()...) + machineRequirements.Add(podRequirements.Values()...) // Check Topology Requirements - topologyRequirements, err := m.topology.AddRequirements(podRequirements, nodeRequirements, pod) + topologyRequirements, err := m.topology.AddRequirements(podRequirements, machineRequirements, pod) if err != nil { return err } - if err = nodeRequirements.Compatible(topologyRequirements); err != nil { + if err = machineRequirements.Compatible(topologyRequirements); err != nil { return err } - nodeRequirements.Add(topologyRequirements.Values()...) + machineRequirements.Add(topologyRequirements.Values()...) // Check instance type combinations requests := resources.Merge(m.Requests, resources.RequestsForPods(pod)) - instanceTypes := filterInstanceTypesByRequirements(m.InstanceTypeOptions, nodeRequirements, requests) + instanceTypes := filterInstanceTypesByRequirements(m.InstanceTypeOptions, machineRequirements, requests) if len(instanceTypes) == 0 { - return fmt.Errorf("no instance type satisfied resources %s and requirements %s", resources.String(resources.RequestsForPods(pod)), nodeRequirements) + return fmt.Errorf("no instance type satisfied resources %s and requirements %s", resources.String(resources.RequestsForPods(pod)), machineRequirements) } // Update node m.Pods = append(m.Pods, pod) m.InstanceTypeOptions = instanceTypes m.Requests = requests - m.Requirements = nodeRequirements - m.topology.Record(pod, nodeRequirements) + m.Requirements = machineRequirements + m.topology.Record(pod, machineRequirements) m.hostPortUsage.Add(ctx, pod) return nil } // FinalizeScheduling is called once all scheduling has completed and allows the node to perform any cleanup // necessary before its requirements are used for instance launching -func (m *Node) FinalizeScheduling() { +func (m *Machine) FinalizeScheduling() { // We need nodes to have hostnames for topology purposes, but we don't want to pass that node name on to consumers // of the node as it will be displayed in error messages delete(m.Requirements, v1.LabelHostname) } -func (m *Node) String() string { - return fmt.Sprintf("node with %d pods requesting %s from types %s", len(m.Pods), resources.String(m.Requests), +func (m *Machine) String() string { + return fmt.Sprintf("machine with %d pods requesting %s from types %s", len(m.Pods), resources.String(m.Requests), InstanceTypeList(m.InstanceTypeOptions)) } diff --git a/pkg/controllers/provisioning/scheduling/scheduler.go b/pkg/controllers/provisioning/scheduling/scheduler.go index da8c5090d8..395ea0fa92 100644 --- a/pkg/controllers/provisioning/scheduling/scheduler.go +++ b/pkg/controllers/provisioning/scheduling/scheduler.go @@ -79,7 +79,7 @@ func NewScheduler(ctx context.Context, kubeClient client.Client, machines []*Mac type Scheduler struct { ctx context.Context - newNodes []*Node + newNodes []*Machine existingNodes []*ExistingNode machineTemplates []*MachineTemplate remainingResources map[string]v1.ResourceList // provisioner name -> remaining resources for that provisioner @@ -93,7 +93,7 @@ type Scheduler struct { kubeClient client.Client } -func (s *Scheduler) Solve(ctx context.Context, pods []*v1.Pod) ([]*Node, []*ExistingNode, error) { +func (s *Scheduler) Solve(ctx context.Context, pods []*v1.Pod) ([]*Machine, []*ExistingNode, error) { // We loop trying to schedule unschedulable pods as long as we are making progress. This solves a few // issues including pods with affinity to another pod in the batch. We could topo-sort to solve this, but it wouldn't // solve the problem of scheduling pods where a particular order is needed to prevent a max-skew violation. E.g. if we @@ -208,7 +208,7 @@ func (s *Scheduler) add(ctx context.Context, pod *v1.Pod) error { } } - node := NewNode(nodeTemplate, s.topology, s.daemonOverhead[nodeTemplate], instanceTypes) + node := NewMachine(nodeTemplate, s.topology, s.daemonOverhead[nodeTemplate], instanceTypes) if err := node.Add(ctx, pod); err != nil { errs = multierr.Append(errs, fmt.Errorf("incompatible with provisioner %q, %w", nodeTemplate.ProvisionerName, err)) continue diff --git a/pkg/controllers/provisioning/scheduling/scheduling_benchmark_test.go b/pkg/controllers/provisioning/scheduling/scheduling_benchmark_test.go index 8bb5b2eabb..8b03b55322 100644 --- a/pkg/controllers/provisioning/scheduling/scheduling_benchmark_test.go +++ b/pkg/controllers/provisioning/scheduling/scheduling_benchmark_test.go @@ -29,6 +29,7 @@ import ( "github.com/samber/lo" "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/client-go/tools/record" "k8s.io/utils/clock" "github.com/aws/karpenter-core/pkg/apis/settings" @@ -36,6 +37,7 @@ import ( "github.com/aws/karpenter-core/pkg/cloudprovider/fake" "github.com/aws/karpenter-core/pkg/controllers/provisioning/scheduling" "github.com/aws/karpenter-core/pkg/controllers/state" + "github.com/aws/karpenter-core/pkg/events" "github.com/aws/karpenter-core/pkg/test" "go.uber.org/zap" @@ -122,7 +124,7 @@ func benchmarkScheduler(b *testing.B, instanceCount, podCount int) { scheduler := scheduling.NewScheduler(ctx, nil, []*scheduling.MachineTemplate{scheduling.NewMachineTemplate(provisioner)}, nil, state.NewCluster(&clock.RealClock{}, nil, cloudProv), nil, &scheduling.Topology{}, map[string][]*cloudprovider.InstanceType{provisioner.Name: instanceTypes}, nil, - test.NewEventRecorder(), + events.NewRecorder(&record.FakeRecorder{}), scheduling.SchedulerOptions{}) pods := makeDiversePods(podCount) diff --git a/pkg/controllers/provisioning/scheduling/suite_test.go b/pkg/controllers/provisioning/scheduling/suite_test.go index 6535e60833..194f2c108d 100644 --- a/pkg/controllers/provisioning/scheduling/suite_test.go +++ b/pkg/controllers/provisioning/scheduling/suite_test.go @@ -22,13 +22,15 @@ import ( "testing" "time" - clock "k8s.io/utils/clock/testing" - - v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/tools/record" + clock "k8s.io/utils/clock/testing" + "knative.dev/pkg/ptr" + + v1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/aws/karpenter-core/pkg/apis" @@ -36,20 +38,19 @@ import ( "github.com/aws/karpenter-core/pkg/apis/v1alpha5" "github.com/aws/karpenter-core/pkg/cloudprovider" "github.com/aws/karpenter-core/pkg/cloudprovider/fake" + "github.com/aws/karpenter-core/pkg/controllers/provisioning" "github.com/aws/karpenter-core/pkg/controllers/provisioning/scheduling" "github.com/aws/karpenter-core/pkg/controllers/state" "github.com/aws/karpenter-core/pkg/controllers/state/informer" + "github.com/aws/karpenter-core/pkg/events" "github.com/aws/karpenter-core/pkg/operator/controller" "github.com/aws/karpenter-core/pkg/operator/scheme" pscheduling "github.com/aws/karpenter-core/pkg/scheduling" - - "github.com/aws/karpenter-core/pkg/controllers/provisioning" "github.com/aws/karpenter-core/pkg/test" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" . "knative.dev/pkg/logging/testing" - "knative.dev/pkg/ptr" . "github.com/aws/karpenter-core/pkg/test/expectations" ) @@ -64,7 +65,6 @@ var cloudProv *fake.CloudProvider var cluster *state.Cluster var nodeStateController controller.Controller var podStateController controller.Controller -var recorder *test.EventRecorder func TestScheduling(t *testing.T) { ctx = TestContextWithLogger(t) @@ -83,9 +83,8 @@ var _ = BeforeSuite(func() { cluster = state.NewCluster(fakeClock, env.Client, cloudProv) nodeStateController = informer.NewNodeController(env.Client, cluster) podStateController = informer.NewPodController(env.Client, cluster) - recorder = test.NewEventRecorder() - prov = provisioning.NewProvisioner(ctx, env.Client, env.KubernetesInterface.CoreV1(), recorder, cloudProv, cluster) - provisioningController = provisioning.NewController(env.Client, prov, recorder) + prov = provisioning.NewProvisioner(ctx, env.Client, env.KubernetesInterface.CoreV1(), events.NewRecorder(&record.FakeRecorder{}), cloudProv, cluster) + provisioningController = provisioning.NewController(env.Client, prov, events.NewRecorder(&record.FakeRecorder{})) }) var _ = AfterSuite(func() { @@ -102,7 +101,6 @@ var _ = BeforeEach(func() { newCP := fake.CloudProvider{} cloudProv.InstanceTypes, _ = newCP.GetInstanceTypes(context.Background(), nil) cloudProv.CreateCalls = nil - recorder.Reset() }) var _ = AfterEach(func() { @@ -115,44 +113,49 @@ var _ = Describe("Custom Constraints", func() { It("should schedule unconstrained pods that don't have matching node selectors", func() { provisioner.Spec.Labels = map[string]string{"test-key": "test-value"} ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod())[0] + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).To(HaveKeyWithValue("test-key", "test-value")) }) It("should not schedule pods that have conflicting node selectors", func() { provisioner.Spec.Labels = map[string]string{"test-key": "test-value"} ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeSelector: map[string]string{"test-key": "different-value"}}, - ))[0] + ) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) }) It("should not schedule pods that have node selectors with undefined key", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeSelector: map[string]string{"test-key": "test-value"}}, - ))[0] + ) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) }) It("should schedule pods that have matching requirements", func() { provisioner.Spec.Labels = map[string]string{"test-key": "test-value"} ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ {Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value", "another-value"}}, }}, - ))[0] + ) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).To(HaveKeyWithValue("test-key", "test-value")) }) It("should not schedule pods that have conflicting requirements", func() { provisioner.Spec.Labels = map[string]string{"test-key": "test-value"} ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ {Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"another-value"}}, }}, - ))[0] + ) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) }) }) @@ -161,7 +164,8 @@ var _ = Describe("Custom Constraints", func() { provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{ {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-2"}}} ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod())[0] + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-2")) }) @@ -169,44 +173,49 @@ var _ = Describe("Custom Constraints", func() { provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{ {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2"}}} ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-2"}}, - ))[0] + ) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-2")) }) It("should not schedule nodes with a hostname selector", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeSelector: map[string]string{v1.LabelHostname: "red-node"}}, - ))[0] + ) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) }) It("should not schedule the pod if nodeselector unknown", func() { provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{ {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1"}}} ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeSelector: map[string]string{v1.LabelTopologyZone: "unknown"}}, - ))[0] + ) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) }) It("should not schedule if node selector outside of provisioner constraints", func() { provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{ {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1"}}} ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-2"}}, - ))[0] + ) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) }) It("should schedule compatible requirements with Operator=In", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-3"}}, }}, - ))[0] + ) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-3")) }) @@ -215,7 +224,8 @@ var _ = Describe("Custom Constraints", func() { Key: fake.IntegerInstanceLabelKey, Operator: v1.NodeSelectorOpGt, Values: []string{"8"}, }} ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod())[0] + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).To(HaveKeyWithValue(fake.IntegerInstanceLabelKey, "16")) }) @@ -224,92 +234,100 @@ var _ = Describe("Custom Constraints", func() { Key: fake.IntegerInstanceLabelKey, Operator: v1.NodeSelectorOpLt, Values: []string{"8"}, }} ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod())[0] + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).To(HaveKeyWithValue(fake.IntegerInstanceLabelKey, "2")) }) It("should not schedule incompatible preferences and requirements with Operator=In", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"unknown"}}, }}, - ))[0] + ) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) }) It("should schedule compatible requirements with Operator=NotIn", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpNotIn, Values: []string{"test-zone-1", "test-zone-2", "unknown"}}, }}, - ))[0] + ) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-3")) }) It("should not schedule incompatible preferences and requirements with Operator=NotIn", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{ NodeRequirements: []v1.NodeSelectorRequirement{ {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpNotIn, Values: []string{"test-zone-1", "test-zone-2", "test-zone-3", "unknown"}}, }}, - ))[0] + ) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) }) It("should schedule compatible preferences and requirements with Operator=In", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{ NodeRequirements: []v1.NodeSelectorRequirement{ {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2", "test-zone-3", "unknown"}}}, NodePreferences: []v1.NodeSelectorRequirement{ {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-2", "unknown"}}}, }, - ))[0] + ) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-2")) }) It("should schedule incompatible preferences and requirements with Operator=In", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{ NodeRequirements: []v1.NodeSelectorRequirement{ {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2", "test-zone-3", "unknown"}}}, NodePreferences: []v1.NodeSelectorRequirement{ {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"unknown"}}}, }, - ))[0] + ) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectScheduled(ctx, env.Client, pod) }) It("should schedule compatible preferences and requirements with Operator=NotIn", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{ NodeRequirements: []v1.NodeSelectorRequirement{ {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2", "test-zone-3", "unknown"}}}, NodePreferences: []v1.NodeSelectorRequirement{ {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpNotIn, Values: []string{"test-zone-1", "test-zone-3"}}}, }, - ))[0] + ) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-2")) }) It("should schedule incompatible preferences and requirements with Operator=NotIn", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{ NodeRequirements: []v1.NodeSelectorRequirement{ {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2", "test-zone-3", "unknown"}}}, NodePreferences: []v1.NodeSelectorRequirement{ {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpNotIn, Values: []string{"test-zone-1", "test-zone-2", "test-zone-3"}}}, }, - ))[0] + ) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectScheduled(ctx, env.Client, pod) }) It("should schedule compatible node selectors, preferences and requirements", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{ NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-3"}, NodeRequirements: []v1.NodeSelectorRequirement{ @@ -317,13 +335,14 @@ var _ = Describe("Custom Constraints", func() { NodePreferences: []v1.NodeSelectorRequirement{ {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2", "test-zone-3"}}}, }, - ))[0] + ) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-3")) }) It("should combine multidimensional node selectors, preferences and requirements", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{ NodeSelector: map[string]string{ v1.LabelTopologyZone: "test-zone-3", @@ -338,7 +357,8 @@ var _ = Describe("Custom Constraints", func() { {Key: v1.LabelInstanceTypeStable, Operator: v1.NodeSelectorOpNotIn, Values: []string{"unknown"}}, }, }, - ))[0] + ) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-3")) Expect(node.Labels).To(HaveKeyWithValue(v1.LabelInstanceTypeStable, "arm-instance-type")) @@ -348,20 +368,22 @@ var _ = Describe("Custom Constraints", func() { It("should not schedule pods that have node selectors with restricted labels", func() { ExpectApplied(ctx, env.Client, provisioner) for label := range v1alpha5.RestrictedLabels { - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ {Key: label, Operator: v1.NodeSelectorOpIn, Values: []string{"test"}}, - }}))[0] + }}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) } }) It("should not schedule pods that have node selectors with restricted domains", func() { ExpectApplied(ctx, env.Client, provisioner) for domain := range v1alpha5.RestrictedLabelDomains { - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ {Key: domain + "/test", Operator: v1.NodeSelectorOpIn, Values: []string{"test"}}, - }}))[0] + }}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) } }) @@ -373,7 +395,8 @@ var _ = Describe("Custom Constraints", func() { provisioner.Spec.Requirements = requirements ExpectApplied(ctx, env.Client, provisioner) for domain := range v1alpha5.LabelDomainExceptions { - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod())[0] + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).To(HaveKeyWithValue(domain+"/test", "test-value")) } @@ -392,7 +415,8 @@ var _ = Describe("Custom Constraints", func() { test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1alpha5.LabelCapacityType: "spot"}}), } ExpectApplied(ctx, env.Client, provisioner) - for _, pod := range ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, schedulable...) { + ExpectProvisioned(ctx, env.Client, cluster, prov, schedulable...) + for _, pod := range schedulable { ExpectScheduled(ctx, env.Client, pod) } }) @@ -400,35 +424,39 @@ var _ = Describe("Custom Constraints", func() { Context("Scheduling Logic", func() { It("should not schedule pods that have node selectors with In operator and undefined key", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ {Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}}, - }}))[0] + }}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) }) It("should schedule pods that have node selectors with NotIn operator and undefined key", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ {Key: "test-key", Operator: v1.NodeSelectorOpNotIn, Values: []string{"test-value"}}, - }}))[0] + }}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).ToNot(HaveKeyWithValue("test-key", "test-value")) }) It("should not schedule pods that have node selectors with Exists operator and undefined key", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ {Key: "test-key", Operator: v1.NodeSelectorOpExists}, - }}))[0] + }}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) }) It("should schedule pods that with DoesNotExists operator and undefined key", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ {Key: "test-key", Operator: v1.NodeSelectorOpDoesNotExist}, - }}))[0] + }}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).ToNot(HaveKey("test-key")) }) @@ -436,7 +464,8 @@ var _ = Describe("Custom Constraints", func() { provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{ {Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}}} ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod())[0] + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).To(HaveKeyWithValue("test-key", "test-value")) }) @@ -444,10 +473,11 @@ var _ = Describe("Custom Constraints", func() { provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{ {Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}}} ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ {Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}}, - }}))[0] + }}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).To(HaveKeyWithValue("test-key", "test-value")) }) @@ -455,52 +485,57 @@ var _ = Describe("Custom Constraints", func() { provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{ {Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}}} ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ {Key: "test-key", Operator: v1.NodeSelectorOpNotIn, Values: []string{"test-value"}}, - }}))[0] + }}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) }) It("should schedule the pod with Exists operator and defined key", func() { provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{ {Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}}} ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ {Key: "test-key", Operator: v1.NodeSelectorOpExists}, }}, - ))[0] + ) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectScheduled(ctx, env.Client, pod) }) It("should not schedule the pod with DoesNotExists operator and defined key", func() { provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{ {Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}}} ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ {Key: "test-key", Operator: v1.NodeSelectorOpDoesNotExist}, }}, - ))[0] + ) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) }) It("should not schedule pods that have node selectors with different value and In operator", func() { provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{ {Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}}} ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ {Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"another-value"}}, - }}))[0] + }}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) }) It("should schedule pods that have node selectors with different value and NotIn operator", func() { provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{ {Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}}} ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ {Key: "test-key", Operator: v1.NodeSelectorOpNotIn, Values: []string{"another-value"}}, - }}))[0] + }}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).To(HaveKeyWithValue("test-key", "test-value")) }) @@ -508,13 +543,16 @@ var _ = Describe("Custom Constraints", func() { provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{ {Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value", "another-value"}}} ExpectApplied(ctx, env.Client, provisioner) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( - test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ - {Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}}, - }}), + pods := []*v1.Pod{ + test.UnschedulablePod( + test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ + {Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}}, + }}), test.UnschedulablePod(test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ {Key: "test-key", Operator: v1.NodeSelectorOpNotIn, Values: []string{"another-value"}}, - }})) + }}), + } + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) node1 := ExpectScheduled(ctx, env.Client, pods[0]) node2 := ExpectScheduled(ctx, env.Client, pods[1]) Expect(node1.Labels).To(HaveKeyWithValue("test-key", "test-value")) @@ -525,13 +563,16 @@ var _ = Describe("Custom Constraints", func() { provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{ {Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value", "another-value"}}} ExpectApplied(ctx, env.Client, provisioner) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( - test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ - {Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}}, - }}), + pods := []*v1.Pod{ + test.UnschedulablePod( + test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ + {Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}}, + }}), test.UnschedulablePod(test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ {Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"another-value"}}, - }})) + }}), + } + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) node1 := ExpectScheduled(ctx, env.Client, pods[0]) node2 := ExpectScheduled(ctx, env.Client, pods[1]) Expect(node1.Labels).To(HaveKeyWithValue("test-key", "test-value")) @@ -540,13 +581,13 @@ var _ = Describe("Custom Constraints", func() { }) It("Exists operator should not overwrite the existing value", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{ NodeRequirements: []v1.NodeSelectorRequirement{ {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"non-existent-zone"}}, {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpExists}, - }}, - ))[0] + }}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) }) }) @@ -567,7 +608,7 @@ var _ = Describe("Preferential Fallback", func() { }}}} // Don't relax ExpectApplied(ctx, env.Client, provisioner) - pod = ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pod)[0] + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) }) It("should relax multiple terms", func() { @@ -588,7 +629,7 @@ var _ = Describe("Preferential Fallback", func() { }}}} // Success ExpectApplied(ctx, env.Client, provisioner) - pod = ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pod)[0] + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-1")) }) @@ -610,7 +651,7 @@ var _ = Describe("Preferential Fallback", func() { }}} // Success ExpectApplied(ctx, env.Client, provisioner) - pod = ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pod)[0] + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectScheduled(ctx, env.Client, pod) }) It("should relax to use lighter weights", func() { @@ -636,7 +677,7 @@ var _ = Describe("Preferential Fallback", func() { }}} // Success ExpectApplied(ctx, env.Client, provisioner) - pod = ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pod)[0] + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-2")) }) @@ -657,7 +698,7 @@ var _ = Describe("Preferential Fallback", func() { }} // Success ExpectApplied(ctx, env.Client, provisioner) - pod = ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pod)[0] + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-3")) }) @@ -667,7 +708,7 @@ var _ = Describe("Preferential Fallback", func() { {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpNotIn, Values: []string{"invalid"}}, }}) ExpectApplied(ctx, env.Client, provisioner) - pod = ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pod)[0] + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectScheduled(ctx, env.Client, pod) }) }) @@ -676,14 +717,14 @@ var _ = Describe("Preferential Fallback", func() { var _ = Describe("Instance Type Compatibility", func() { It("should not schedule if requesting more resources than any instance type has", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, - test.UnschedulablePod(test.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: map[v1.ResourceName]resource.Quantity{ - v1.ResourceCPU: resource.MustParse("512"), - }}, - })) - ExpectNotScheduled(ctx, env.Client, pod[0]) + pod := test.UnschedulablePod(test.PodOptions{ + ResourceRequirements: v1.ResourceRequirements{ + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("512"), + }}, + }) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + ExpectNotScheduled(ctx, env.Client, pod) }) It("should launch pods with different archs on different instances", func() { provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{{ @@ -693,13 +734,16 @@ var _ = Describe("Instance Type Compatibility", func() { }} nodeNames := sets.NewString() ExpectApplied(ctx, env.Client, provisioner) - for _, pod := range ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + pods := []*v1.Pod{ test.UnschedulablePod(test.PodOptions{ NodeSelector: map[string]string{v1.LabelArchStable: v1alpha5.ArchitectureAmd64}, }), test.UnschedulablePod(test.PodOptions{ NodeSelector: map[string]string{v1.LabelArchStable: v1alpha5.ArchitectureArm64}, - })) { + }), + } + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) + for _, pod := range pods { node := ExpectScheduled(ctx, env.Client, pod) nodeNames.Insert(node.Name) } @@ -712,17 +756,17 @@ var _ = Describe("Instance Type Compatibility", func() { Values: []string{v1alpha5.ArchitectureAmd64}, }} ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, - test.UnschedulablePod(test.PodOptions{ - NodeRequirements: []v1.NodeSelectorRequirement{ - { - Key: v1.LabelInstanceTypeStable, - Operator: v1.NodeSelectorOpIn, - Values: []string{"arm-instance-type"}, - }, - }})) + pod := test.UnschedulablePod(test.PodOptions{ + NodeRequirements: []v1.NodeSelectorRequirement{ + { + Key: v1.LabelInstanceTypeStable, + Operator: v1.NodeSelectorOpIn, + Values: []string{"arm-instance-type"}, + }, + }}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) // arm instance type conflicts with the provisioner limitation of AMD only - ExpectNotScheduled(ctx, env.Client, pod[0]) + ExpectNotScheduled(ctx, env.Client, pod) }) It("should exclude instance types that are not supported by the pod constraints (node affinity/operating system)", func() { provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{{ @@ -731,18 +775,18 @@ var _ = Describe("Instance Type Compatibility", func() { Values: []string{v1alpha5.ArchitectureAmd64}, }} ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, - test.UnschedulablePod(test.PodOptions{ - NodeRequirements: []v1.NodeSelectorRequirement{ - { - Key: v1.LabelOSStable, - Operator: v1.NodeSelectorOpIn, - Values: []string{"ios"}, - }, - }})) + pod := test.UnschedulablePod(test.PodOptions{ + NodeRequirements: []v1.NodeSelectorRequirement{ + { + Key: v1.LabelOSStable, + Operator: v1.NodeSelectorOpIn, + Values: []string{"ios"}, + }, + }}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) // there's an instance with an OS of ios, but it has an arm processor so the provider requirements will // exclude it - ExpectNotScheduled(ctx, env.Client, pod[0]) + ExpectNotScheduled(ctx, env.Client, pod) }) It("should exclude instance types that are not supported by the provider constraints (arch)", func() { provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{{ @@ -751,11 +795,11 @@ var _ = Describe("Instance Type Compatibility", func() { Values: []string{v1alpha5.ArchitectureAmd64}, }} ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, - test.UnschedulablePod(test.PodOptions{ResourceRequirements: v1.ResourceRequirements{ - Limits: map[v1.ResourceName]resource.Quantity{v1.ResourceCPU: resource.MustParse("14")}}})) + pod := test.UnschedulablePod(test.PodOptions{ResourceRequirements: v1.ResourceRequirements{ + Limits: map[v1.ResourceName]resource.Quantity{v1.ResourceCPU: resource.MustParse("14")}}}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) // only the ARM instance has enough CPU, but it's not allowed per the provisioner - ExpectNotScheduled(ctx, env.Client, pod[0]) + ExpectNotScheduled(ctx, env.Client, pod) }) It("should launch pods with different operating systems on different instances", func() { provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{{ @@ -765,13 +809,16 @@ var _ = Describe("Instance Type Compatibility", func() { }} nodeNames := sets.NewString() ExpectApplied(ctx, env.Client, provisioner) - for _, pod := range ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + pods := []*v1.Pod{ test.UnschedulablePod(test.PodOptions{ NodeSelector: map[string]string{v1.LabelOSStable: string(v1.Linux)}, }), test.UnschedulablePod(test.PodOptions{ NodeSelector: map[string]string{v1.LabelOSStable: string(v1.Windows)}, - })) { + }), + } + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) + for _, pod := range pods { node := ExpectScheduled(ctx, env.Client, pod) nodeNames.Insert(node.Name) } @@ -785,13 +832,16 @@ var _ = Describe("Instance Type Compatibility", func() { }} nodeNames := sets.NewString() ExpectApplied(ctx, env.Client, provisioner) - for _, pod := range ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + pods := []*v1.Pod{ test.UnschedulablePod(test.PodOptions{ NodeSelector: map[string]string{v1.LabelInstanceType: "small-instance-type"}, }), test.UnschedulablePod(test.PodOptions{ NodeSelector: map[string]string{v1.LabelInstanceTypeStable: "default-instance-type"}, - })) { + }), + } + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) + for _, pod := range pods { node := ExpectScheduled(ctx, env.Client, pod) nodeNames.Insert(node.Name) } @@ -805,13 +855,16 @@ var _ = Describe("Instance Type Compatibility", func() { }} nodeNames := sets.NewString() ExpectApplied(ctx, env.Client, provisioner) - for _, pod := range ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + pods := []*v1.Pod{ test.UnschedulablePod(test.PodOptions{ NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-1"}, }), test.UnschedulablePod(test.PodOptions{ NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-2"}, - })) { + }), + } + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) + for _, pod := range pods { node := ExpectScheduled(ctx, env.Client, pod) nodeNames.Insert(node.Name) } @@ -826,7 +879,7 @@ var _ = Describe("Instance Type Compatibility", func() { nodeNames := sets.NewString() ExpectApplied(ctx, env.Client, provisioner) - for _, pod := range ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + pods := []*v1.Pod{ test.UnschedulablePod(test.PodOptions{ ResourceRequirements: v1.ResourceRequirements{ Limits: v1.ResourceList{fakeGPU1: resource.MustParse("1")}, @@ -837,7 +890,10 @@ var _ = Describe("Instance Type Compatibility", func() { ResourceRequirements: v1.ResourceRequirements{ Limits: v1.ResourceList{fakeGPU2: resource.MustParse("1")}, }, - })) { + }), + } + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) + for _, pod := range pods { node := ExpectScheduled(ctx, env.Client, pod) nodeNames.Insert(node.Name) } @@ -851,24 +907,25 @@ var _ = Describe("Instance Type Compatibility", func() { cloudProv.InstanceTypes[1].Capacity[fakeGPU2] = resource.MustParse("25") ExpectApplied(ctx, env.Client, provisioner) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, - test.UnschedulablePod(test.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Limits: v1.ResourceList{ - fakeGPU1: resource.MustParse("1"), - fakeGPU2: resource.MustParse("1")}, - }, - })) - ExpectNotScheduled(ctx, env.Client, pods[0]) + pod := test.UnschedulablePod(test.PodOptions{ + ResourceRequirements: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + fakeGPU1: resource.MustParse("1"), + fakeGPU2: resource.MustParse("1")}, + }, + }) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + ExpectNotScheduled(ctx, env.Client, pod) }) Context("Provider Specific Labels", func() { It("should filter instance types that match labels", func() { cloudProv.InstanceTypes = fake.InstanceTypes(5) ExpectApplied(ctx, env.Client, provisioner) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + pods := []*v1.Pod{ test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{fake.LabelInstanceSize: "large"}}), test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{fake.LabelInstanceSize: "small"}}), - ) + } + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) node := ExpectScheduled(ctx, env.Client, pods[0]) Expect(node.Labels).To(HaveKeyWithValue(v1.LabelInstanceTypeStable, "fake-it-4")) node = ExpectScheduled(ctx, env.Client, pods[1]) @@ -877,7 +934,7 @@ var _ = Describe("Instance Type Compatibility", func() { It("should not schedule with incompatible labels", func() { cloudProv.InstanceTypes = fake.InstanceTypes(5) ExpectApplied(ctx, env.Client, provisioner) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + pods := []*v1.Pod{ test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{ fake.LabelInstanceSize: "large", v1.LabelInstanceTypeStable: cloudProv.InstanceTypes[0].Name, @@ -886,215 +943,59 @@ var _ = Describe("Instance Type Compatibility", func() { fake.LabelInstanceSize: "small", v1.LabelInstanceTypeStable: cloudProv.InstanceTypes[4].Name, }}), - ) + } + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) ExpectNotScheduled(ctx, env.Client, pods[0]) ExpectNotScheduled(ctx, env.Client, pods[1]) }) It("should schedule optional labels", func() { cloudProv.InstanceTypes = fake.InstanceTypes(5) ExpectApplied(ctx, env.Client, provisioner) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, - test.UnschedulablePod(test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ - // Only some instance types have this key - {Key: fake.ExoticInstanceLabelKey, Operator: v1.NodeSelectorOpExists}, - }}), - ) - node := ExpectScheduled(ctx, env.Client, pods[0]) + pod := test.UnschedulablePod(test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ + // Only some instance types have this key + {Key: fake.ExoticInstanceLabelKey, Operator: v1.NodeSelectorOpExists}, + }}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).To(HaveKey(fake.ExoticInstanceLabelKey)) Expect(node.Labels).To(HaveKeyWithValue(v1.LabelInstanceTypeStable, cloudProv.InstanceTypes[4].Name)) }) It("should schedule without optional labels if disallowed", func() { cloudProv.InstanceTypes = fake.InstanceTypes(5) ExpectApplied(ctx, env.Client, test.Provisioner()) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, - test.UnschedulablePod(test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ - // Only some instance types have this key - {Key: fake.ExoticInstanceLabelKey, Operator: v1.NodeSelectorOpDoesNotExist}, - }}), - ) - node := ExpectScheduled(ctx, env.Client, pods[0]) + pod := test.UnschedulablePod(test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ + // Only some instance types have this key + {Key: fake.ExoticInstanceLabelKey, Operator: v1.NodeSelectorOpDoesNotExist}, + }}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).ToNot(HaveKey(fake.ExoticInstanceLabelKey)) }) }) }) -var _ = Describe("Networking constraints", func() { - Context("HostPort", func() { - It("shouldn't co-locate pods that use the same HostPort and protocol (default protocol)", func() { - port := v1.ContainerPort{ - Name: "test-port", - HostPort: 80, - ContainerPort: 1234, - } - pod1 := test.UnschedulablePod() - pod1.Spec.Containers[0].Ports = append(pod1.Spec.Containers[0].Ports, port) - pod2 := test.UnschedulablePod() - pod2.Spec.Containers[0].Ports = append(pod2.Spec.Containers[0].Ports, port) - - ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pod1, pod2) - node1 := ExpectScheduled(ctx, env.Client, pod1) - node2 := ExpectScheduled(ctx, env.Client, pod2) - Expect(node1.Name).ToNot(Equal(node2.Name)) - }) - It("shouldn't co-locate pods that use the same HostPort and protocol (specific protocol)", func() { - port := v1.ContainerPort{ - Name: "test-port", - HostPort: 80, - ContainerPort: 1234, - Protocol: "UDP", - } - pod1 := test.UnschedulablePod() - pod1.Spec.Containers[0].Ports = append(pod1.Spec.Containers[0].Ports, port) - pod2 := test.UnschedulablePod() - pod2.Spec.Containers[0].Ports = append(pod2.Spec.Containers[0].Ports, port) - - ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pod1, pod2) - node1 := ExpectScheduled(ctx, env.Client, pod1) - node2 := ExpectScheduled(ctx, env.Client, pod2) - Expect(node1.Name).ToNot(Equal(node2.Name)) - }) - It("shouldn't co-locate pods that use the same HostPort and IP (default (_))", func() { - port := v1.ContainerPort{ - Name: "test-port", - HostPort: 80, - ContainerPort: 1234, - } - pod1 := test.UnschedulablePod() - pod1.Spec.Containers[0].Ports = append(pod1.Spec.Containers[0].Ports, port) - port.HostIP = "1.2.3.4" // Defaulted "0.0.0.0" on pod1 should conflict - pod2 := test.UnschedulablePod() - pod2.Spec.Containers[0].Ports = append(pod2.Spec.Containers[0].Ports, port) - - ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pod1, pod2) - node1 := ExpectScheduled(ctx, env.Client, pod1) - node2 := ExpectScheduled(ctx, env.Client, pod2) - Expect(node1.Name).ToNot(Equal(node2.Name)) - }) - It("shouldn't co-locate pods that use the same HostPort but a different IP, where one ip is 0.0.0.0", func() { - port := v1.ContainerPort{ - Name: "test-port", - HostPort: 80, - ContainerPort: 1234, - Protocol: "TCP", - HostIP: "1.2.3.4", - } - pod1 := test.UnschedulablePod() - pod1.Spec.Containers[0].Ports = append(pod1.Spec.Containers[0].Ports, port) - pod2 := test.UnschedulablePod() - port.HostIP = "0.0.0.0" // all interfaces - pod2.Spec.Containers[0].Ports = append(pod2.Spec.Containers[0].Ports, port) - - ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pod1, pod2) - node1 := ExpectScheduled(ctx, env.Client, pod1) - node2 := ExpectScheduled(ctx, env.Client, pod2) - Expect(node1.Name).ToNot(Equal(node2.Name)) - }) - It("shouldn't co-locate pods that use the same HostPort but a different IP, where one ip is 0.0.0.0 (existingNodes)", func() { - port := v1.ContainerPort{ - Name: "test-port", - HostPort: 80, - ContainerPort: 1234, - Protocol: "TCP", - HostIP: "1.2.3.4", - } - pod1 := test.UnschedulablePod() - pod1.Spec.Containers[0].Ports = append(pod1.Spec.Containers[0].Ports, port) - pod2 := test.UnschedulablePod() - port.HostIP = "0.0.0.0" // all interfaces - pod2.Spec.Containers[0].Ports = append(pod2.Spec.Containers[0].Ports, port) - - ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pod1) - node1 := ExpectScheduled(ctx, env.Client, pod1) - ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1)) - - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pod2) - node2 := ExpectScheduled(ctx, env.Client, pod2) - Expect(node1.Name).ToNot(Equal(node2.Name)) - }) - It("should co-locate pods that use the same HostPort but a different protocol", func() { - port := v1.ContainerPort{ - Name: "test-port", - HostPort: 80, - ContainerPort: 1234, - Protocol: "TCP", - } - pod1 := test.UnschedulablePod() - pod1.Spec.Containers[0].Ports = append(pod1.Spec.Containers[0].Ports, port) - pod2 := test.UnschedulablePod() - port.Protocol = "UDP" - pod2.Spec.Containers[0].Ports = append(pod2.Spec.Containers[0].Ports, port) - - ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pod1, pod2) - node1 := ExpectScheduled(ctx, env.Client, pod1) - node2 := ExpectScheduled(ctx, env.Client, pod2) - Expect(node1.Name).To(Equal(node2.Name)) - }) - It("should co-locate pods that use the same HostPort but a different IP", func() { - port := v1.ContainerPort{ - Name: "test-port", - HostPort: 80, - ContainerPort: 1234, - Protocol: "TCP", - HostIP: "1.2.3.4", - } - pod1 := test.UnschedulablePod() - pod1.Spec.Containers[0].Ports = append(pod1.Spec.Containers[0].Ports, port) - pod2 := test.UnschedulablePod() - port.HostIP = "4.5.6.7" - pod2.Spec.Containers[0].Ports = append(pod2.Spec.Containers[0].Ports, port) - - ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pod1, pod2) - node1 := ExpectScheduled(ctx, env.Client, pod1) - node2 := ExpectScheduled(ctx, env.Client, pod2) - Expect(node1.Name).To(Equal(node2.Name)) - }) - It("should co-locate pods that don't use HostPort", func() { - port := v1.ContainerPort{ - Name: "test-port", - ContainerPort: 1234, - Protocol: "TCP", - } - pod1 := test.UnschedulablePod() - pod1.Spec.Containers[0].Ports = append(pod1.Spec.Containers[0].Ports, port) - pod2 := test.UnschedulablePod() - pod2.Spec.Containers[0].Ports = append(pod2.Spec.Containers[0].Ports, port) - - ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pod1, pod2) - node1 := ExpectScheduled(ctx, env.Client, pod1) - node2 := ExpectScheduled(ctx, env.Client, pod2) - Expect(node1.Name).To(Equal(node2.Name)) - }) - }) -}) - var _ = Describe("Binpacking", func() { It("should schedule a small pod on the smallest instance", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{ResourceRequirements: v1.ResourceRequirements{ Requests: map[v1.ResourceName]resource.Quantity{ v1.ResourceMemory: resource.MustParse("100M"), }, - }}))[0] + }}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels[v1.LabelInstanceTypeStable]).To(Equal("small-instance-type")) }) It("should schedule a small pod on the smallest possible instance type", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{ResourceRequirements: v1.ResourceRequirements{ Requests: map[v1.ResourceName]resource.Quantity{ v1.ResourceMemory: resource.MustParse("2000M"), }, - }}))[0] + }}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels[v1.LabelInstanceTypeStable]).To(Equal("small-instance-type")) }) @@ -1106,8 +1007,9 @@ var _ = Describe("Binpacking", func() { v1.ResourceMemory: resource.MustParse("10M"), }, }} + pods := test.Pods(5, opts) ExpectApplied(ctx, env.Client, provisioner) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.Pods(5, opts)...) + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) nodeNames := sets.NewString() for _, p := range pods { node := ExpectScheduled(ctx, env.Client, p) @@ -1126,7 +1028,8 @@ var _ = Describe("Binpacking", func() { }, }} ExpectApplied(ctx, env.Client, provisioner) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.Pods(40, opts)...) + pods := test.Pods(40, opts) + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) nodeNames := sets.NewString() for _, p := range pods { node := ExpectScheduled(ctx, env.Client, p) @@ -1158,9 +1061,9 @@ var _ = Describe("Binpacking", func() { // should only end up with 20 newNodes total. provPods := append(test.Pods(40, largeOpts), test.Pods(20, smallOpts)...) ExpectApplied(ctx, env.Client, provisioner) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, provPods...) + ExpectProvisioned(ctx, env.Client, cluster, prov, provPods...) nodeNames := sets.NewString() - for _, p := range pods { + for _, p := range provPods { node := ExpectScheduled(ctx, env.Client, p) nodeNames.Insert(node.Name) Expect(node.Labels[v1.LabelInstanceTypeStable]).To(Equal("default-instance-type")) @@ -1171,7 +1074,7 @@ var _ = Describe("Binpacking", func() { cloudProv.InstanceTypes = fake.InstanceTypes(5) var nodes []*v1.Node ExpectApplied(ctx, env.Client, provisioner) - for _, pod := range ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + pods := []*v1.Pod{ test.UnschedulablePod(test.PodOptions{ ResourceRequirements: v1.ResourceRequirements{ Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("4.5")}, @@ -1181,7 +1084,10 @@ var _ = Describe("Binpacking", func() { ResourceRequirements: v1.ResourceRequirements{ Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")}, }, - })) { + }), + } + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) + for _, pod := range pods { node := ExpectScheduled(ctx, env.Client, pod) nodes = append(nodes, node) } @@ -1192,24 +1098,25 @@ var _ = Describe("Binpacking", func() { }) It("should handle zero-quantity resource requests", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, - test.UnschedulablePod(test.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{"foo.com/weird-resources": resource.MustParse("0")}, - Limits: v1.ResourceList{"foo.com/weird-resources": resource.MustParse("0")}, - }, - })) + pod := test.UnschedulablePod(test.PodOptions{ + ResourceRequirements: v1.ResourceRequirements{ + Requests: v1.ResourceList{"foo.com/weird-resources": resource.MustParse("0")}, + Limits: v1.ResourceList{"foo.com/weird-resources": resource.MustParse("0")}, + }, + }) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) // requesting a resource of quantity zero of a type unsupported by any instance is fine - ExpectScheduled(ctx, env.Client, pod[0]) + ExpectScheduled(ctx, env.Client, pod) }) It("should not schedule pods that exceed every instance type's capacity", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{ResourceRequirements: v1.ResourceRequirements{ Requests: map[v1.ResourceName]resource.Quantity{ v1.ResourceMemory: resource.MustParse("2Ti"), }, - }}))[0] + }}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) }) It("should create new newNodes when a node is at capacity due to pod limits per node", func() { @@ -1223,7 +1130,8 @@ var _ = Describe("Binpacking", func() { }, }} ExpectApplied(ctx, env.Client, provisioner) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.Pods(25, opts)...) + pods := test.Pods(25, opts) + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) nodeNames := sets.NewString() // all of the test instance types support 5 pods each, so we use the 5 instances of the smallest one for our 25 pods for _, p := range pods { @@ -1235,7 +1143,7 @@ var _ = Describe("Binpacking", func() { }) It("should take into account initContainer resource requests when binpacking", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{ResourceRequirements: v1.ResourceRequirements{ Requests: map[v1.ResourceName]resource.Quantity{ v1.ResourceMemory: resource.MustParse("1Gi"), @@ -1249,13 +1157,14 @@ var _ = Describe("Binpacking", func() { v1.ResourceCPU: resource.MustParse("2"), }, }, - }))[0] + }) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels[v1.LabelInstanceTypeStable]).To(Equal("default-instance-type")) }) It("should not schedule pods when initContainer resource requests are greater than available instance types", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{ResourceRequirements: v1.ResourceRequirements{ Requests: map[v1.ResourceName]resource.Quantity{ v1.ResourceMemory: resource.MustParse("1Gi"), @@ -1269,7 +1178,8 @@ var _ = Describe("Binpacking", func() { v1.ResourceCPU: resource.MustParse("2"), }, }, - }))[0] + }) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) }) It("should select for valid instance types, regardless of price", func() { @@ -1323,15 +1233,16 @@ var _ = Describe("Binpacking", func() { }), } ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{ResourceRequirements: v1.ResourceRequirements{ Limits: map[v1.ResourceName]resource.Quantity{ v1.ResourceCPU: resource.MustParse("1m"), v1.ResourceMemory: resource.MustParse("1Mi"), }, }}, - )) - node := ExpectScheduled(ctx, env.Client, pod[0]) + ) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + node := ExpectScheduled(ctx, env.Client, pod) // large is the cheapest, so we should pick it, but the other two types are also valid options Expect(node.Labels[v1.LabelInstanceTypeStable]).To(Equal("large")) // all three options should be passed to the cloud provider @@ -1348,17 +1259,19 @@ var _ = Describe("In-Flight Nodes", func() { }, }} ExpectApplied(ctx, env.Client, provisioner) - initialPod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(opts)) - node1 := ExpectScheduled(ctx, env.Client, initialPod[0]) + initialPod := test.UnschedulablePod(opts) + ExpectProvisioned(ctx, env.Client, cluster, prov, initialPod) + node1 := ExpectScheduled(ctx, env.Client, initialPod) ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1)) - secondPod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(opts)) - node2 := ExpectScheduled(ctx, env.Client, secondPod[0]) + secondPod := test.UnschedulablePod(opts) + ExpectProvisioned(ctx, env.Client, cluster, prov, secondPod) + node2 := ExpectScheduled(ctx, env.Client, secondPod) Expect(node1.Name).To(Equal(node2.Name)) }) It("should not launch a second node if there is an in-flight node that can support the pod (node selectors)", func() { ExpectApplied(ctx, env.Client, provisioner) - initialPod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(test.PodOptions{ResourceRequirements: v1.ResourceRequirements{ + initialPod := test.UnschedulablePod(test.PodOptions{ResourceRequirements: v1.ResourceRequirements{ Limits: map[v1.ResourceName]resource.Quantity{ v1.ResourceCPU: resource.MustParse("10m"), }, @@ -1367,12 +1280,13 @@ var _ = Describe("In-Flight Nodes", func() { Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-2"}, - }}})) - node1 := ExpectScheduled(ctx, env.Client, initialPod[0]) + }}}) + ExpectProvisioned(ctx, env.Client, cluster, prov, initialPod) + node1 := ExpectScheduled(ctx, env.Client, initialPod) ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1)) // the node gets created in test-zone-2 - secondPod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(test.PodOptions{ResourceRequirements: v1.ResourceRequirements{ + secondPod := test.UnschedulablePod(test.PodOptions{ResourceRequirements: v1.ResourceRequirements{ Limits: map[v1.ResourceName]resource.Quantity{ v1.ResourceCPU: resource.MustParse("10m"), }, @@ -1381,14 +1295,15 @@ var _ = Describe("In-Flight Nodes", func() { Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2"}, - }}})) + }}}) + ExpectProvisioned(ctx, env.Client, cluster, prov, secondPod) // test-zone-2 is in the intersection of their node selectors and the node has capacity, so we shouldn't create a new node - node2 := ExpectScheduled(ctx, env.Client, secondPod[0]) + node2 := ExpectScheduled(ctx, env.Client, secondPod) ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1)) Expect(node1.Name).To(Equal(node2.Name)) // the node gets created in test-zone-2 - thirdPod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(test.PodOptions{ResourceRequirements: v1.ResourceRequirements{ + thirdPod := test.UnschedulablePod(test.PodOptions{ResourceRequirements: v1.ResourceRequirements{ Limits: map[v1.ResourceName]resource.Quantity{ v1.ResourceCPU: resource.MustParse("10m"), }, @@ -1397,9 +1312,10 @@ var _ = Describe("In-Flight Nodes", func() { Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-3"}, - }}})) + }}}) + ExpectProvisioned(ctx, env.Client, cluster, prov, thirdPod) // node is in test-zone-2, so this pod needs a new node - node3 := ExpectScheduled(ctx, env.Client, thirdPod[0]) + node3 := ExpectScheduled(ctx, env.Client, thirdPod) Expect(node1.Name).ToNot(Equal(node3.Name)) }) It("should launch a second node if a pod won't fit on the existingNodes node", func() { @@ -1409,14 +1325,16 @@ var _ = Describe("In-Flight Nodes", func() { v1.ResourceCPU: resource.MustParse("1001m"), }, }} - initialPod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(opts)) - node1 := ExpectScheduled(ctx, env.Client, initialPod[0]) + initialPod := test.UnschedulablePod(opts) + ExpectProvisioned(ctx, env.Client, cluster, prov, initialPod) + node1 := ExpectScheduled(ctx, env.Client, initialPod) ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1)) // the node will have 2000m CPU, so these two pods can't both fit on it opts.ResourceRequirements.Limits[v1.ResourceCPU] = resource.MustParse("1") - secondPod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(opts)) - node2 := ExpectScheduled(ctx, env.Client, secondPod[0]) + secondPod := test.UnschedulablePod(opts) + ExpectProvisioned(ctx, env.Client, cluster, prov, secondPod) + node2 := ExpectScheduled(ctx, env.Client, secondPod) Expect(node1.Name).ToNot(Equal(node2.Name)) }) It("should launch a second node if a pod isn't compatible with the existingNodes node (node selector)", func() { @@ -1426,13 +1344,14 @@ var _ = Describe("In-Flight Nodes", func() { v1.ResourceCPU: resource.MustParse("10m"), }, }} - initialPod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(opts)) - node1 := ExpectScheduled(ctx, env.Client, initialPod[0]) + initialPod := test.UnschedulablePod(opts) + ExpectProvisioned(ctx, env.Client, cluster, prov, initialPod) + node1 := ExpectScheduled(ctx, env.Client, initialPod) ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1)) - secondPod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, - test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1.LabelArchStable: "arm64"}})) - node2 := ExpectScheduled(ctx, env.Client, secondPod[0]) + secondPod := test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1.LabelArchStable: "arm64"}}) + ExpectProvisioned(ctx, env.Client, cluster, prov, secondPod) + node2 := ExpectScheduled(ctx, env.Client, secondPod) Expect(node1.Name).ToNot(Equal(node2.Name)) }) It("should launch a second node if an in-flight node is terminating", func() { @@ -1442,8 +1361,9 @@ var _ = Describe("In-Flight Nodes", func() { }, }} ExpectApplied(ctx, env.Client, provisioner) - initialPod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(opts)) - node1 := ExpectScheduled(ctx, env.Client, initialPod[0]) + initialPod := test.UnschedulablePod(opts) + ExpectProvisioned(ctx, env.Client, cluster, prov, initialPod) + node1 := ExpectScheduled(ctx, env.Client, initialPod) ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1)) // delete the node @@ -1452,8 +1372,9 @@ var _ = Describe("In-Flight Nodes", func() { ExpectDeleted(ctx, env.Client, node1) ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1)) - secondPod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(opts)) - node2 := ExpectScheduled(ctx, env.Client, secondPod[0]) + secondPod := test.UnschedulablePod(opts) + ExpectProvisioned(ctx, env.Client, cluster, prov, secondPod) + node2 := ExpectScheduled(ctx, env.Client, secondPod) Expect(node1.Name).ToNot(Equal(node2.Name)) }) Context("Topology", func() { @@ -1466,7 +1387,7 @@ var _ = Describe("In-Flight Nodes", func() { MaxSkew: 1, }} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), @@ -1482,7 +1403,7 @@ var _ = Describe("In-Flight Nodes", func() { } firstRoundNumNodes := len(nodeList.Items) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), @@ -1504,7 +1425,7 @@ var _ = Describe("In-Flight Nodes", func() { MaxSkew: 1, }} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), @@ -1518,8 +1439,7 @@ var _ = Describe("In-Flight Nodes", func() { for _, node := range nodeList.Items { ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKey{Name: node.Name}) } - - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), @@ -1538,17 +1458,19 @@ var _ = Describe("In-Flight Nodes", func() { }, }} ExpectApplied(ctx, env.Client, provisioner) - initialPod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(opts)) - node1 := ExpectScheduled(ctx, env.Client, initialPod[0]) + initialPod := test.UnschedulablePod(opts) + ExpectProvisioned(ctx, env.Client, cluster, prov, initialPod) + node1 := ExpectScheduled(ctx, env.Client, initialPod) // delete the pod so that the node is empty - ExpectDeleted(ctx, env.Client, initialPod[0]) + ExpectDeleted(ctx, env.Client, initialPod) node1.Spec.Taints = nil ExpectApplied(ctx, env.Client, node1) ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1)) - secondPod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod()) - node2 := ExpectScheduled(ctx, env.Client, secondPod[0]) + secondPod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, secondPod) + node2 := ExpectScheduled(ctx, env.Client, secondPod) Expect(node1.Name).To(Equal(node2.Name)) }) It("should not assume pod will schedule to a tainted node", func() { @@ -1558,11 +1480,12 @@ var _ = Describe("In-Flight Nodes", func() { }, }} ExpectApplied(ctx, env.Client, provisioner) - initialPod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(opts)) - node1 := ExpectScheduled(ctx, env.Client, initialPod[0]) + initialPod := test.UnschedulablePod(opts) + ExpectProvisioned(ctx, env.Client, cluster, prov, initialPod) + node1 := ExpectScheduled(ctx, env.Client, initialPod) // delete the pod so that the node is empty - ExpectDeleted(ctx, env.Client, initialPod[0]) + ExpectDeleted(ctx, env.Client, initialPod) // and taint it node1.Spec.Taints = append(node1.Spec.Taints, v1.Taint{ Key: "foo.com/taint", @@ -1572,8 +1495,9 @@ var _ = Describe("In-Flight Nodes", func() { ExpectApplied(ctx, env.Client, node1) ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1)) - secondPod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod()) - node2 := ExpectScheduled(ctx, env.Client, secondPod[0]) + secondPod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, secondPod) + node2 := ExpectScheduled(ctx, env.Client, secondPod) Expect(node1.Name).ToNot(Equal(node2.Name)) }) It("should assume pod will schedule to a tainted node with a custom startup taint", func() { @@ -1588,11 +1512,12 @@ var _ = Describe("In-Flight Nodes", func() { Effect: v1.TaintEffectNoSchedule, }) ExpectApplied(ctx, env.Client, provisioner) - initialPod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(opts)) - node1 := ExpectScheduled(ctx, env.Client, initialPod[0]) + initialPod := test.UnschedulablePod(opts) + ExpectProvisioned(ctx, env.Client, cluster, prov, initialPod) + node1 := ExpectScheduled(ctx, env.Client, initialPod) // delete the pod so that the node is empty - ExpectDeleted(ctx, env.Client, initialPod[0]) + ExpectDeleted(ctx, env.Client, initialPod) // startup taint + node not ready taint = 2 Expect(node1.Spec.Taints).To(HaveLen(2)) Expect(node1.Spec.Taints).To(ContainElement(v1.Taint{ @@ -1603,19 +1528,21 @@ var _ = Describe("In-Flight Nodes", func() { ExpectApplied(ctx, env.Client, node1) ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1)) - secondPod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod()) - node2 := ExpectScheduled(ctx, env.Client, secondPod[0]) + secondPod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, secondPod) + node2 := ExpectScheduled(ctx, env.Client, secondPod) Expect(node1.Name).To(Equal(node2.Name)) }) It("should not assume pod will schedule to a node with startup taints after initialization", func() { startupTaint := v1.Taint{Key: "ignore-me", Value: "nothing-to-see-here", Effect: v1.TaintEffectNoSchedule} provisioner.Spec.StartupTaints = []v1.Taint{startupTaint} ExpectApplied(ctx, env.Client, provisioner) - initialPod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod()) - node1 := ExpectScheduled(ctx, env.Client, initialPod[0]) + initialPod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, initialPod) + node1 := ExpectScheduled(ctx, env.Client, initialPod) // delete the pod so that the node is empty - ExpectDeleted(ctx, env.Client, initialPod[0]) + ExpectDeleted(ctx, env.Client, initialPod) // Mark it initialized which only occurs once the startup taint was removed and re-apply only the startup taint. // We also need to add resource capacity as after initialization we assume that kubelet has recorded them. @@ -1627,8 +1554,9 @@ var _ = Describe("In-Flight Nodes", func() { ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1)) // we should launch a new node since the startup taint is there, but was gone at some point - secondPod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod()) - node2 := ExpectScheduled(ctx, env.Client, secondPod[0]) + secondPod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, secondPod) + node2 := ExpectScheduled(ctx, env.Client, secondPod) Expect(node1.Name).ToNot(Equal(node2.Name)) }) It("should consider a tainted NotReady node as in-flight even if initialized", func() { @@ -1637,8 +1565,9 @@ var _ = Describe("In-Flight Nodes", func() { }} ExpectApplied(ctx, env.Client, provisioner) - // Schedule to New Node - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(opts))[0] + // Schedule to New Machine + pod := test.UnschedulablePod(opts) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node1 := ExpectScheduled(ctx, env.Client, pod) ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1)) // Mark Initialized @@ -1648,8 +1577,9 @@ var _ = Describe("In-Flight Nodes", func() { {Key: v1.TaintNodeUnreachable, Effect: v1.TaintEffectNoSchedule}, } ExpectApplied(ctx, env.Client, node1) - // Schedule to In Flight Node - pod = ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(opts))[0] + // Schedule to In Flight Machine + pod = test.UnschedulablePod(opts) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node2 := ExpectScheduled(ctx, env.Client, pod) ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node2)) @@ -1673,8 +1603,9 @@ var _ = Describe("In-Flight Nodes", func() { v1.ResourceCPU: resource.MustParse("8"), }, }} - initialPod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(opts)) - node1 := ExpectScheduled(ctx, env.Client, initialPod[0]) + initialPod := test.UnschedulablePod(opts) + ExpectProvisioned(ctx, env.Client, cluster, prov, initialPod) + node1 := ExpectScheduled(ctx, env.Client, initialPod) // create our daemonset pod and manually bind it to the node dsPod := test.UnschedulablePod(test.PodOptions{ @@ -1694,7 +1625,7 @@ var _ = Describe("In-Flight Nodes", func() { }) // delete the pod so that the node is empty - ExpectDeleted(ctx, env.Client, initialPod[0]) + ExpectDeleted(ctx, env.Client, initialPod) ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1)) ExpectApplied(ctx, env.Client, provisioner, dsPod) @@ -1725,8 +1656,9 @@ var _ = Describe("In-Flight Nodes", func() { }} // this pod should schedule on the existingNodes node as the daemonset pod has already bound, meaning that the // remaining daemonset resources should be zero leaving 14.9 CPUs for the pod - secondPod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(opts)) - node2 := ExpectScheduled(ctx, env.Client, secondPod[0]) + secondPod := test.UnschedulablePod(opts) + ExpectProvisioned(ctx, env.Client, cluster, prov, secondPod) + node2 := ExpectScheduled(ctx, env.Client, secondPod) Expect(node1.Name).To(Equal(node2.Name)) }) It("should handle unexpected daemonset pods binding to the node", func() { @@ -1753,8 +1685,9 @@ var _ = Describe("In-Flight Nodes", func() { v1.ResourceCPU: resource.MustParse("8"), }, }} - initialPod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(opts)) - node1 := ExpectScheduled(ctx, env.Client, initialPod[0]) + initialPod := test.UnschedulablePod(opts) + ExpectProvisioned(ctx, env.Client, cluster, prov, initialPod) + node1 := ExpectScheduled(ctx, env.Client, initialPod) // this label appears on the node for some reason that Karpenter can't track node1.Labels["my-node-label"] = "value" ExpectApplied(ctx, env.Client, node1) @@ -1780,7 +1713,7 @@ var _ = Describe("In-Flight Nodes", func() { }) // delete the pod so that the node is empty - ExpectDeleted(ctx, env.Client, initialPod[0]) + ExpectDeleted(ctx, env.Client, initialPod) ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1)) ExpectApplied(ctx, env.Client, provisioner, dsPod) @@ -1813,8 +1746,9 @@ var _ = Describe("In-Flight Nodes", func() { // we don't reintroduce a bug where more daemonsets scheduled than anticipated due to unexepected labels // appearing on the node which caused us to compute a negative amount of resources remaining for daemonsets // which in turn caused us to mis-calculate the amount of resources that were free on the node. - secondPod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(opts)) - node2 := ExpectScheduled(ctx, env.Client, secondPod[0]) + secondPod := test.UnschedulablePod(opts) + ExpectProvisioned(ctx, env.Client, cluster, prov, secondPod) + node2 := ExpectScheduled(ctx, env.Client, secondPod) // must create a new node Expect(node1.Name).ToNot(Equal(node2.Name)) }) @@ -1842,7 +1776,8 @@ var _ = Describe("In-Flight Nodes", func() { // scheduling in multiple batches random sets of pods for i := 0; i < 10; i++ { - initialPods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, MakePods(rand.Intn(10), opts)...) + initialPods := MakePods(rand.Intn(10), opts) + ExpectProvisioned(ctx, env.Client, cluster, prov, initialPods...) for _, pod := range initialPods { node := ExpectScheduled(ctx, env.Client, pod) ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node)) @@ -1876,7 +1811,7 @@ var _ = Describe("In-Flight Nodes", func() { ExpectApplied(ctx, env.Client, provisioner) pod := test.UnschedulablePod(opts) - ExpectProvisionedNoBinding(ctx, env.Client, provisioningController, prov, pod) + ExpectProvisionedNoBinding(ctx, env.Client, prov, pod) var nodes v1.NodeList Expect(env.Client.List(ctx, &nodes)).To(Succeed()) Expect(nodes.Items).To(HaveLen(1)) @@ -1884,7 +1819,7 @@ var _ = Describe("In-Flight Nodes", func() { pod.Status.Conditions = []v1.PodCondition{{Type: v1.PodScheduled, Reason: v1.PodReasonUnschedulable, Status: v1.ConditionFalse}} ExpectApplied(ctx, env.Client, pod) - ExpectProvisionedNoBinding(ctx, env.Client, provisioningController, prov, pod) + ExpectProvisionedNoBinding(ctx, env.Client, prov, pod) Expect(env.Client.List(ctx, &nodes)).To(Succeed()) // shouldn't create a second node Expect(nodes.Items).To(HaveLen(1)) @@ -1905,8 +1840,9 @@ var _ = Describe("No Pre-Binding", func() { Expect(nodeList.Items).To(HaveLen(0)) ExpectApplied(ctx, env.Client, provisioner) - initialPod := ExpectProvisionedNoBinding(ctx, env.Client, provisioningController, prov, test.UnschedulablePod(opts)) - ExpectNotScheduled(ctx, env.Client, initialPod[0]) + initialPod := test.UnschedulablePod(opts) + ExpectProvisionedNoBinding(ctx, env.Client, prov, initialPod) + ExpectNotScheduled(ctx, env.Client, initialPod) // should launch a single node Expect(env.Client.List(ctx, &nodeList)).To(Succeed()) @@ -1914,8 +1850,9 @@ var _ = Describe("No Pre-Binding", func() { node1 := &nodeList.Items[0] ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1)) - secondPod := ExpectProvisionedNoBinding(ctx, env.Client, provisioningController, prov, test.UnschedulablePod(opts)) - ExpectNotScheduled(ctx, env.Client, secondPod[0]) + secondPod := test.UnschedulablePod(opts) + ExpectProvisionedNoBinding(ctx, env.Client, prov, secondPod) + ExpectNotScheduled(ctx, env.Client, secondPod) // shouldn't create a second node as it can bind to the existingNodes node Expect(env.Client.List(ctx, &nodeList)).To(Succeed()) Expect(nodeList.Items).To(HaveLen(1)) @@ -1935,8 +1872,9 @@ var _ = Describe("No Pre-Binding", func() { Expect(nodeList.Items).To(HaveLen(0)) ExpectApplied(ctx, env.Client, provisioner) - initialPod := ExpectProvisionedNoBinding(ctx, env.Client, provisioningController, prov, test.UnschedulablePod(opts)) - ExpectNotScheduled(ctx, env.Client, initialPod[0]) + initialPod := test.UnschedulablePod(opts) + ExpectProvisionedNoBinding(ctx, env.Client, prov, initialPod) + ExpectNotScheduled(ctx, env.Client, initialPod) // should launch a single node Expect(env.Client.List(ctx, &nodeList)).To(Succeed()) @@ -1954,8 +1892,9 @@ var _ = Describe("No Pre-Binding", func() { ExpectApplied(ctx, env.Client, node1) ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1)) - secondPod := ExpectProvisionedNoBinding(ctx, env.Client, provisioningController, prov, test.UnschedulablePod(opts)) - ExpectNotScheduled(ctx, env.Client, secondPod[0]) + secondPod := test.UnschedulablePod(opts) + ExpectProvisionedNoBinding(ctx, env.Client, prov, secondPod) + ExpectNotScheduled(ctx, env.Client, secondPod) // shouldn't create a second node as it can bind to the existingNodes node Expect(env.Client.List(ctx, &nodeList)).To(Succeed()) Expect(nodeList.Items).To(HaveLen(1)) @@ -1976,7 +1915,7 @@ var _ = Describe("No Pre-Binding", func() { }}, }) ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisionedNoBinding(ctx, env.Client, provisioningController, prov, pods[0]) + ExpectProvisionedNoBinding(ctx, env.Client, prov, pods[0]) var nodeList v1.NodeList Expect(env.Client.List(ctx, &nodeList)).To(Succeed()) for i := range nodeList.Items { @@ -1985,7 +1924,7 @@ var _ = Describe("No Pre-Binding", func() { // the second pod can schedule against the in-flight node, but for that to work we need to be careful // in how we fulfill the self-affinity by taking the existing node's domain as a preference over any // random viable domain - ExpectProvisionedNoBinding(ctx, env.Client, provisioningController, prov, pods[1]) + ExpectProvisionedNoBinding(ctx, env.Client, prov, pods[1]) Expect(env.Client.List(ctx, &nodeList)).To(Succeed()) Expect(nodeList.Items).To(HaveLen(1)) }) @@ -2007,8 +1946,9 @@ var _ = Describe("VolumeUsage", func() { provisioner.Spec.Limits = nil ExpectApplied(ctx, env.Client, provisioner) - initialPods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod()) - node := ExpectScheduled(ctx, env.Client, initialPods[0]) + initialPod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, initialPod) + node := ExpectScheduled(ctx, env.Client, initialPod) csiNode := &storagev1.CSINode{ ObjectMeta: metav1.ObjectMeta{ Name: node.Name, @@ -2049,7 +1989,7 @@ var _ = Describe("VolumeUsage", func() { PersistentVolumeClaims: []string{pvcA.Name, pvcB.Name}, })) } - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pods...) + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) var nodeList v1.NodeList Expect(env.Client.List(ctx, &nodeList)).To(Succeed()) // we need to create a new node as the in-flight one can only contain 5 pods due to the CSINode volume limit @@ -2070,8 +2010,9 @@ var _ = Describe("VolumeUsage", func() { provisioner.Spec.Limits = nil ExpectApplied(ctx, env.Client, provisioner) - initialPods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod()) - node := ExpectScheduled(ctx, env.Client, initialPods[0]) + initialPod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, initialPod) + node := ExpectScheduled(ctx, env.Client, initialPod) csiNode := &storagev1.CSINode{ ObjectMeta: metav1.ObjectMeta{ Name: node.Name, @@ -2115,7 +2056,7 @@ var _ = Describe("VolumeUsage", func() { })) } ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pods...) + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) var nodeList v1.NodeList Expect(env.Client.List(ctx, &nodeList)).To(Succeed()) // 100 of the same PVC should all be schedulable on the same node @@ -2136,8 +2077,9 @@ var _ = Describe("VolumeUsage", func() { provisioner.Spec.Limits = nil ExpectApplied(ctx, env.Client, provisioner) - initialPods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod()) - node := ExpectScheduled(ctx, env.Client, initialPods[0]) + initialPod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, initialPod) + node := ExpectScheduled(ctx, env.Client, initialPod) csiNode := &storagev1.CSINode{ ObjectMeta: metav1.ObjectMeta{ Name: node.Name, @@ -2182,7 +2124,7 @@ var _ = Describe("VolumeUsage", func() { })) } ExpectApplied(ctx, env.Client, provisioner) - _ = ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pods...) + _ = ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) var nodeList v1.NodeList Expect(env.Client.List(ctx, &nodeList)).To(Succeed()) @@ -2203,8 +2145,9 @@ var _ = Describe("VolumeUsage", func() { provisioner.Spec.Limits = nil ExpectApplied(ctx, env.Client, provisioner) - initialPods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod()) - node := ExpectScheduled(ctx, env.Client, initialPods[0]) + initialPod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, initialPod) + node := ExpectScheduled(ctx, env.Client, initialPod) ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node)) pv := test.PersistentVolume(test.PersistentVolumeOptions{ @@ -2231,7 +2174,7 @@ var _ = Describe("VolumeUsage", func() { })) } ExpectApplied(ctx, env.Client, provisioner) - _ = ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pods...) + _ = ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) var nodeList v1.NodeList Expect(env.Client.List(ctx, &nodeList)).To(Succeed()) @@ -2262,7 +2205,7 @@ var _ = Describe("VolumeUsage", func() { }, }) ExpectApplied(ctx, env.Client, provisioner) - _ = ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pod) + _ = ExpectProvisioned(ctx, env.Client, cluster, prov, pod) var nodeList v1.NodeList Expect(env.Client.List(ctx, &nodeList)).To(Succeed()) diff --git a/pkg/controllers/provisioning/scheduling/topology_test.go b/pkg/controllers/provisioning/scheduling/topology_test.go index b3524b260e..8e72fa5ba0 100644 --- a/pkg/controllers/provisioning/scheduling/topology_test.go +++ b/pkg/controllers/provisioning/scheduling/topology_test.go @@ -18,7 +18,6 @@ import ( "context" "time" - . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -26,6 +25,8 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/client" + . "github.com/onsi/ginkgo/v2" + "github.com/aws/karpenter-core/pkg/apis/v1alpha5" "github.com/aws/karpenter-core/pkg/cloudprovider/fake" "github.com/aws/karpenter-core/pkg/test" @@ -37,14 +38,18 @@ var _ = Describe("Topology", func() { It("should ignore unknown topology keys", func() { ExpectApplied(ctx, env.Client, provisioner) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( - test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: []v1.TopologySpreadConstraint{{ - TopologyKey: "unknown", - WhenUnsatisfiable: v1.DoNotSchedule, - LabelSelector: &metav1.LabelSelector{MatchLabels: labels}, - MaxSkew: 1, - }}}, - ), test.UnschedulablePod()) + pods := []*v1.Pod{ + test.UnschedulablePod( + test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: []v1.TopologySpreadConstraint{{ + TopologyKey: "unknown", + WhenUnsatisfiable: v1.DoNotSchedule, + LabelSelector: &metav1.LabelSelector{MatchLabels: labels}, + MaxSkew: 1, + }}}, + ), + test.UnschedulablePod(), + } + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) ExpectNotScheduled(ctx, env.Client, pods[0]) ExpectScheduled(ctx, env.Client, pods[1]) }) @@ -57,7 +62,7 @@ var _ = Describe("Topology", func() { MaxSkew: 1, }} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, MakePods(2, test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology})...) ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(2)) }) @@ -71,7 +76,7 @@ var _ = Describe("Topology", func() { MaxSkew: 1, }} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), @@ -95,7 +100,7 @@ var _ = Describe("Topology", func() { MaxSkew: 1, }} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), @@ -113,7 +118,7 @@ var _ = Describe("Topology", func() { MaxSkew: 1, }} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), @@ -131,7 +136,7 @@ var _ = Describe("Topology", func() { MaxSkew: 1, }} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), @@ -149,14 +154,14 @@ var _ = Describe("Topology", func() { v1.ResourceCPU: resource.MustParse("1.1"), }, } - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, - test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, - ResourceRequirements: rr, - NodeSelector: map[string]string{ - v1.LabelTopologyZone: "test-zone-3", - }, - })) - ExpectScheduled(ctx, env.Client, pods[0]) + pod := test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, + ResourceRequirements: rr, + NodeSelector: map[string]string{ + v1.LabelTopologyZone: "test-zone-3", + }, + }) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + ExpectScheduled(ctx, env.Client, pod) provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{ {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2"}}} @@ -167,7 +172,7 @@ var _ = Describe("Topology", func() { MaxSkew: 1, }} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, ResourceRequirements: rr, TopologySpreadConstraints: topology}), test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, ResourceRequirements: rr, TopologySpreadConstraints: topology}), test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, ResourceRequirements: rr, TopologySpreadConstraints: topology}), @@ -195,7 +200,7 @@ var _ = Describe("Topology", func() { provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{ {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1"}}} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, ResourceRequirements: rr, TopologySpreadConstraints: topology})) ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1)) @@ -204,7 +209,7 @@ var _ = Describe("Topology", func() { provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{ {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-2"}}} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, ResourceRequirements: rr, TopologySpreadConstraints: topology})) ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1, 1)) @@ -213,7 +218,7 @@ var _ = Describe("Topology", func() { provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{ {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-3"}}} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, MakePods(10, test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, ResourceRequirements: rr, TopologySpreadConstraints: topology})..., ) @@ -243,7 +248,8 @@ var _ = Describe("Topology", func() { } // Spread 9 pods ExpectApplied(ctx, env.Client, provisioner) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, createPods(9)...) + pods := createPods(9) + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(3, 3, 3)) // Delete pods to create a skew @@ -256,7 +262,7 @@ var _ = Describe("Topology", func() { ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(3)) // Create 3 more pods, skew should recover - _ = ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, createPods(3)...) + ExpectProvisioned(ctx, env.Client, cluster, prov, createPods(3)...) ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(3, 1, 2)) }) It("should not violate max-skew when unsat = do not schedule", func() { @@ -275,7 +281,7 @@ var _ = Describe("Topology", func() { provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{ {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1"}}} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, ResourceRequirements: rr, TopologySpreadConstraints: topology})) ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1)) @@ -284,7 +290,7 @@ var _ = Describe("Topology", func() { provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{ {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-2", "test-zone-3"}}} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, MakePods(10, test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, ResourceRequirements: rr, TopologySpreadConstraints: topology})..., ) @@ -308,14 +314,14 @@ var _ = Describe("Topology", func() { provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{ {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1"}}} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, ResourceRequirements: rr})) // now only allow scheduling pods on zone-2 and zone-3 provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{ {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-2", "test-zone-3"}}} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, MakePods(10, test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology, ResourceRequirements: rr})..., ) @@ -339,7 +345,7 @@ var _ = Describe("Topology", func() { ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(firstNode)) ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(secondNode)) ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(thirdNode)) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, test.Pod(test.PodOptions{NodeName: firstNode.Name}), // ignored, missing labels test.Pod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}}), // ignored, pending test.Pod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, NodeName: thirdNode.Name}), // ignored, no domain on node @@ -364,7 +370,7 @@ var _ = Describe("Topology", func() { MaxSkew: 1, }} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, test.UnschedulablePod(), ) ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1)) @@ -377,8 +383,9 @@ var _ = Describe("Topology", func() { MaxSkew: 1, }} ExpectApplied(ctx, env.Client, provisioner) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, - MakePods(5, test.PodOptions{TopologySpreadConstraints: topology})..., + pods := MakePods(5, test.PodOptions{TopologySpreadConstraints: topology}) + ExpectProvisioned(ctx, env.Client, cluster, prov, + pods..., ) // This is weird, but the topology label selector is used for determining domain counts. The pod that // owns the topology is what the spread actually applies to. In this test case, there are no pods matching @@ -404,7 +411,7 @@ var _ = Describe("Topology", func() { MaxSkew: 1, }} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), @@ -420,7 +427,7 @@ var _ = Describe("Topology", func() { MaxSkew: 4, }} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), @@ -451,11 +458,12 @@ var _ = Describe("Topology", func() { } ExpectApplied(ctx, env.Client, provisioner) - scheduled := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + pods := []*v1.Pod{ test.UnschedulablePod(spreadPod("app1")), test.UnschedulablePod(spreadPod("app1")), - test.UnschedulablePod(spreadPod("app2")), test.UnschedulablePod(spreadPod("app2"))) - - for _, p := range scheduled { + test.UnschedulablePod(spreadPod("app2")), test.UnschedulablePod(spreadPod("app2")), + } + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) + for _, p := range pods { ExpectScheduled(ctx, env.Client, p) } nodes := v1.NodeList{} @@ -493,11 +501,12 @@ var _ = Describe("Topology", func() { } ExpectApplied(ctx, env.Client, provisioner) - scheduled := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + pods := []*v1.Pod{ test.UnschedulablePod(spreadPod("app1", v1alpha5.ArchitectureAmd64)), test.UnschedulablePod(spreadPod("app1", v1alpha5.ArchitectureAmd64)), - test.UnschedulablePod(spreadPod("app2", v1alpha5.ArchitectureArm64)), test.UnschedulablePod(spreadPod("app2", v1alpha5.ArchitectureArm64))) - - for _, p := range scheduled { + test.UnschedulablePod(spreadPod("app2", v1alpha5.ArchitectureArm64)), test.UnschedulablePod(spreadPod("app2", v1alpha5.ArchitectureArm64)), + } + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) + for _, p := range pods { ExpectScheduled(ctx, env.Client, p) } nodes := v1.NodeList{} @@ -516,7 +525,7 @@ var _ = Describe("Topology", func() { MaxSkew: 1, }} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), @@ -534,7 +543,7 @@ var _ = Describe("Topology", func() { MaxSkew: 1, }} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}), @@ -560,7 +569,7 @@ var _ = Describe("Topology", func() { provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{ {Key: v1alpha5.LabelCapacityType, Operator: v1.NodeSelectorOpIn, Values: []string{v1alpha5.CapacityTypeSpot}}} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, ResourceRequirements: rr, TopologySpreadConstraints: topology})) ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1)) @@ -569,7 +578,7 @@ var _ = Describe("Topology", func() { provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{ {Key: v1alpha5.LabelCapacityType, Operator: v1.NodeSelectorOpIn, Values: []string{v1alpha5.CapacityTypeOnDemand}}} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, MakePods(5, test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, ResourceRequirements: rr, TopologySpreadConstraints: topology})..., ) @@ -592,7 +601,7 @@ var _ = Describe("Topology", func() { provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{ {Key: v1alpha5.LabelCapacityType, Operator: v1.NodeSelectorOpIn, Values: []string{v1alpha5.CapacityTypeSpot}}} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, ResourceRequirements: rr, TopologySpreadConstraints: topology})) ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1)) @@ -600,7 +609,7 @@ var _ = Describe("Topology", func() { provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{ {Key: v1alpha5.LabelCapacityType, Operator: v1.NodeSelectorOpIn, Values: []string{v1alpha5.CapacityTypeOnDemand}}} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, MakePods(5, test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, ResourceRequirements: rr, TopologySpreadConstraints: topology})..., ) @@ -623,7 +632,7 @@ var _ = Describe("Topology", func() { ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(firstNode)) ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(secondNode)) ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(thirdNode)) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, test.Pod(test.PodOptions{NodeName: firstNode.Name}), // ignored, missing labels test.Pod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}}), // ignored, pending test.Pod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, NodeName: thirdNode.Name}), // ignored, no domain on node @@ -648,7 +657,7 @@ var _ = Describe("Topology", func() { MaxSkew: 1, }} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, test.UnschedulablePod(), ) ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1)) @@ -661,9 +670,8 @@ var _ = Describe("Topology", func() { MaxSkew: 1, }} ExpectApplied(ctx, env.Client, provisioner) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, - MakePods(5, test.PodOptions{TopologySpreadConstraints: topology})..., - ) + pods := MakePods(5, test.PodOptions{TopologySpreadConstraints: topology}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) // This is weird, but the topology label selector is used for determining domain counts. The pod that // owns the topology is what the spread actually applies to. In this test case, there are no pods matching // the label selector, so the max skew is zero. This means we can pack all the pods onto the same node since @@ -679,15 +687,16 @@ var _ = Describe("Topology", func() { }) It("should balance pods across capacity-types (node required affinity constrained)", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, MakePods(1, test.PodOptions{ + pods := MakePods(1, test.PodOptions{ ObjectMeta: metav1.ObjectMeta{Labels: labels}, NodeRequirements: []v1.NodeSelectorRequirement{ // launch this on-demand pod in zone-1 {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1"}}, {Key: v1alpha5.LabelCapacityType, Operator: v1.NodeSelectorOpIn, Values: []string{"on-demand"}}, }, - })...) - ExpectScheduled(ctx, env.Client, pod[0]) + }) + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) + ExpectScheduled(ctx, env.Client, pods[0]) topology := []v1.TopologySpreadConstraint{{ TopologyKey: v1alpha5.LabelCapacityType, @@ -700,7 +709,7 @@ var _ = Describe("Topology", func() { // spot node. This doesn't violate the max-skew of 1 as the node selector requirement here excludes the // existing on-demand pod from counting within this topology. ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, MakePods(5, test.PodOptions{ ObjectMeta: metav1.ObjectMeta{Labels: labels}, // limit our provisioner to only creating spot nodes @@ -718,7 +727,7 @@ var _ = Describe("Topology", func() { Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2")}, } ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(test.PodOptions{ + pod := test.UnschedulablePod(test.PodOptions{ ObjectMeta: metav1.ObjectMeta{Labels: labels}, NodeSelector: map[string]string{v1.LabelInstanceTypeStable: "single-pod-instance-type"}, NodeRequirements: []v1.NodeSelectorRequirement{ @@ -728,8 +737,8 @@ var _ = Describe("Topology", func() { Values: []string{"on-demand"}, }, }, - }))[0] - + }) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectScheduled(ctx, env.Client, pod) topology := []v1.TopologySpreadConstraint{{ @@ -747,7 +756,7 @@ var _ = Describe("Topology", func() { // since there is no node selector on this pod, the topology can see the single on-demand node that already // exists and that limits us to scheduling 2 more spot pods before we would violate max-skew ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, MakePods(5, test.PodOptions{ ObjectMeta: metav1.ObjectMeta{Labels: labels}, ResourceRequirements: rr, @@ -761,7 +770,7 @@ var _ = Describe("Topology", func() { Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2")}, } ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(test.PodOptions{ + pod := test.UnschedulablePod(test.PodOptions{ ObjectMeta: metav1.ObjectMeta{Labels: labels}, NodeSelector: map[string]string{v1.LabelInstanceTypeStable: "single-pod-instance-type"}, NodeRequirements: []v1.NodeSelectorRequirement{ @@ -771,9 +780,10 @@ var _ = Describe("Topology", func() { Values: []string{"amd64"}, }, }, - })) + }) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) - ExpectScheduled(ctx, env.Client, pod[0]) + ExpectScheduled(ctx, env.Client, pod) topology := []v1.TopologySpreadConstraint{{ TopologyKey: v1.LabelArchStable, @@ -789,7 +799,7 @@ var _ = Describe("Topology", func() { // since there is no node selector on this pod, the topology can see the single arm64 node that already // exists and that limits us to scheduling 2 more spot pods before we would violate max-skew ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, MakePods(5, test.PodOptions{ ObjectMeta: metav1.ObjectMeta{Labels: labels}, ResourceRequirements: rr, @@ -814,28 +824,28 @@ var _ = Describe("Topology", func() { MaxSkew: 3, }} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, MakePods(2, test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology})..., ) ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1, 1)) ExpectSkew(ctx, env.Client, "default", &topology[1]).ToNot(ContainElements(BeNumerically(">", 3))) ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, MakePods(3, test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology})..., ) ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(2, 2, 1)) ExpectSkew(ctx, env.Client, "default", &topology[1]).ToNot(ContainElements(BeNumerically(">", 3))) ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, MakePods(5, test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology})..., ) ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(4, 3, 3)) ExpectSkew(ctx, env.Client, "default", &topology[1]).ToNot(ContainElements(BeNumerically(">", 3))) ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, MakePods(11, test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology})..., ) ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(7, 7, 7)) @@ -878,10 +888,11 @@ var _ = Describe("Topology", func() { MaxSkew: 1, }} ExpectApplied(ctx, env.Client, spotProv, onDemandProv) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, MakePods(20, test.PodOptions{ + pods := MakePods(20, test.PodOptions{ ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology, - })...) + }) + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) for _, p := range pods { ExpectScheduled(ctx, env.Client, p) } @@ -922,7 +933,7 @@ var _ = Describe("Topology", func() { } ExpectApplied(ctx, env.Client, provisioner, provisionerB) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, MakePods(10, test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology})..., ) @@ -945,28 +956,28 @@ var _ = Describe("Topology", func() { MaxSkew: 3, }} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, MakePods(2, test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology})..., ) ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1, 1)) ExpectSkew(ctx, env.Client, "default", &topology[1]).ToNot(ContainElements(BeNumerically(">", 3))) ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, MakePods(3, test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology})..., ) ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(3, 2)) ExpectSkew(ctx, env.Client, "default", &topology[1]).ToNot(ContainElements(BeNumerically(">", 3))) ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, MakePods(5, test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology})..., ) ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(5, 5)) ExpectSkew(ctx, env.Client, "default", &topology[1]).ToNot(ContainElements(BeNumerically(">", 3))) ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, MakePods(11, test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology})..., ) ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(11, 10)) @@ -988,25 +999,25 @@ var _ = Describe("Topology", func() { MaxSkew: 1, }} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, MakePods(2, test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology})..., ) ExpectSkew(ctx, env.Client, "default", &topology[0]).ToNot(ContainElements(BeNumerically(">", 1))) ExpectSkew(ctx, env.Client, "default", &topology[1]).ToNot(ContainElements(BeNumerically(">", 1))) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, MakePods(3, test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology})..., ) ExpectSkew(ctx, env.Client, "default", &topology[0]).ToNot(ContainElements(BeNumerically(">", 3))) ExpectSkew(ctx, env.Client, "default", &topology[1]).ToNot(ContainElements(BeNumerically(">", 2))) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, MakePods(5, test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology})..., ) ExpectSkew(ctx, env.Client, "default", &topology[0]).ToNot(ContainElements(BeNumerically(">", 5))) ExpectSkew(ctx, env.Client, "default", &topology[1]).ToNot(ContainElements(BeNumerically(">", 4))) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, MakePods(11, test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology})..., ) ExpectSkew(ctx, env.Client, "default", &topology[0]).ToNot(ContainElements(BeNumerically(">", 11))) @@ -1040,7 +1051,7 @@ var _ = Describe("Topology", func() { for i := 1; i < 15; i++ { pods := MakePods(i, test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}) ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pods...) + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) ExpectMaxSkew(ctx, env.Client, "default", &topology[0]).To(BeNumerically("<=", 1)) ExpectMaxSkew(ctx, env.Client, "default", &topology[1]).To(BeNumerically("<=", 2)) ExpectMaxSkew(ctx, env.Client, "default", &topology[2]).To(BeNumerically("<=", 3)) @@ -1052,7 +1063,7 @@ var _ = Describe("Topology", func() { }) // https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#interaction-with-node-affinity-and-node-selectors - Context("Combined Zonal Topology and Node Affinity", func() { + Context("Combined Zonal Topology and Machine Affinity", func() { It("should limit spread options by nodeSelector", func() { topology := []v1.TopologySpreadConstraint{{ TopologyKey: v1.LabelTopologyZone, @@ -1061,7 +1072,7 @@ var _ = Describe("Topology", func() { MaxSkew: 1, }} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, append( MakePods(5, test.PodOptions{ ObjectMeta: metav1.ObjectMeta{Labels: labels}, @@ -1087,7 +1098,7 @@ var _ = Describe("Topology", func() { MaxSkew: 1, }} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, MakePods(10, test.PodOptions{ ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology, @@ -1110,7 +1121,7 @@ var _ = Describe("Topology", func() { }} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, MakePods(6, test.PodOptions{ ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology, @@ -1125,7 +1136,7 @@ var _ = Describe("Topology", func() { {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2", "test-zone-3"}}} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, MakePods(1, test.PodOptions{ + ExpectProvisioned(ctx, env.Client, cluster, prov, MakePods(1, test.PodOptions{ ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology, NodeRequirements: []v1.NodeSelectorRequirement{{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{ @@ -1137,7 +1148,7 @@ var _ = Describe("Topology", func() { ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(3, 3, 1)) ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, MakePods(5, test.PodOptions{ ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology, @@ -1148,7 +1159,7 @@ var _ = Describe("Topology", func() { }) // https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#interaction-with-node-affinity-and-node-selectors - Context("Combined Capacity Type Topology and Node Affinity", func() { + Context("Combined Capacity Type Topology and Machine Affinity", func() { It("should limit spread options by nodeSelector", func() { topology := []v1.TopologySpreadConstraint{{ TopologyKey: v1alpha5.LabelCapacityType, @@ -1157,7 +1168,7 @@ var _ = Describe("Topology", func() { MaxSkew: 1, }} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, append( MakePods(5, test.PodOptions{ ObjectMeta: metav1.ObjectMeta{Labels: labels}, @@ -1183,7 +1194,7 @@ var _ = Describe("Topology", func() { // need to limit the rules to spot or else it will know that on-demand has 0 pods and won't violate the max-skew ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, MakePods(3, test.PodOptions{ ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology, @@ -1195,7 +1206,7 @@ var _ = Describe("Topology", func() { // open the rules back to up so it can see all capacity types ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, MakePods(1, test.PodOptions{ + ExpectProvisioned(ctx, env.Client, cluster, prov, MakePods(1, test.PodOptions{ ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology, NodeRequirements: []v1.NodeSelectorRequirement{ @@ -1207,7 +1218,7 @@ var _ = Describe("Topology", func() { ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(3, 1)) ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + ExpectProvisioned(ctx, env.Client, cluster, prov, MakePods(5, test.PodOptions{ ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology, @@ -1221,10 +1232,11 @@ var _ = Describe("Topology", func() { It("should schedule a pod with empty pod affinity and anti-affinity", func() { ExpectApplied(ctx, env.Client) ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(test.PodOptions{ + pod := test.UnschedulablePod(test.PodOptions{ PodRequirements: []v1.PodAffinityTerm{}, PodAntiRequirements: []v1.PodAffinityTerm{}, - }))[0] + }) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectScheduled(ctx, env.Client, pod) }) It("should respect pod affinity (hostname)", func() { @@ -1255,7 +1267,7 @@ var _ = Describe("Topology", func() { pods = append(pods, affPod2) ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pods...) + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) n1 := ExpectScheduled(ctx, env.Client, affPod1) n2 := ExpectScheduled(ctx, env.Client, affPod2) // should be scheduled on the same node @@ -1296,7 +1308,7 @@ var _ = Describe("Topology", func() { pods := []*v1.Pod{affPod1, affPod2} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pods...) + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) n1 := ExpectScheduled(ctx, env.Client, affPod1) n2 := ExpectScheduled(ctx, env.Client, affPod2) // should be scheduled on a node with the same arch @@ -1320,7 +1332,7 @@ var _ = Describe("Topology", func() { }) ExpectApplied(ctx, env.Client, provisioner) - pods = ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pods...) + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) nodeNames := map[string]struct{}{} for _, p := range pods { n := ExpectScheduled(ctx, env.Client, p) @@ -1344,7 +1356,8 @@ var _ = Describe("Topology", func() { }) } ExpectApplied(ctx, env.Client, provisioner) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, createPods()...) + pods := createPods() + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) nodeNames := map[string]struct{}{} unscheduledCount := 0 scheduledCount := 0 @@ -1363,7 +1376,8 @@ var _ = Describe("Topology", func() { Expect(unscheduledCount).To(BeNumerically("==", 5)) // and pods in a different batch should not schedule as well even if the node is not ready yet - pods = ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, createPods()...) + pods = createPods() + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) for _, p := range pods { ExpectNotScheduled(ctx, env.Client, p) } @@ -1373,7 +1387,7 @@ var _ = Describe("Topology", func() { // put one pod in test-zone-1, this does affect pod affinity even though we have different node selectors. // The node selector and required node affinity restrictions to topology counting only apply to topology spread. ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(test.PodOptions{ + ExpectProvisioned(ctx, env.Client, cluster, prov, test.UnschedulablePod(test.PodOptions{ ObjectMeta: metav1.ObjectMeta{ Labels: affLabels, }, @@ -1406,7 +1420,7 @@ var _ = Describe("Topology", func() { TopologyKey: v1.LabelHostname, }}, }) - pods = ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pods...) + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) for _, p := range pods { // none of this should schedule ExpectNotScheduled(ctx, env.Client, p) @@ -1428,7 +1442,7 @@ var _ = Describe("Topology", func() { }) ExpectApplied(ctx, env.Client, provisioner) - pods = ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pods...) + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) nodeNames := map[string]struct{}{} for _, p := range pods { n := ExpectScheduled(ctx, env.Client, p) @@ -1458,7 +1472,7 @@ var _ = Describe("Topology", func() { }, }) ExpectApplied(ctx, env.Client, provisioner) - pods = ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pods...) + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) nodeNames := map[string]struct{}{} for _, p := range pods { n := ExpectScheduled(ctx, env.Client, p) @@ -1494,7 +1508,7 @@ var _ = Describe("Topology", func() { pods = append(pods, affPod2) ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pods...) + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) // should be scheduled as the pod it has affinity to doesn't exist, but it's only a preference and not a // hard constraints ExpectScheduled(ctx, env.Client, affPod2) @@ -1527,7 +1541,7 @@ var _ = Describe("Topology", func() { pods = append(pods, affPods...) ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pods...) + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) for _, aff := range affPods { ExpectScheduled(ctx, env.Client, aff) } @@ -1548,7 +1562,7 @@ var _ = Describe("Topology", func() { TopologyKey: v1.LabelHostname, }}}) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, affPod2, affPod1) + ExpectProvisioned(ctx, env.Client, cluster, prov, affPod2, affPod1) n1 := ExpectScheduled(ctx, env.Client, affPod1) n2 := ExpectScheduled(ctx, env.Client, affPod2) // should not be scheduled on the same node @@ -1585,7 +1599,7 @@ var _ = Describe("Topology", func() { }}}) ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, zone1Pod, zone2Pod, zone3Pod, affPod) + ExpectProvisioned(ctx, env.Client, cluster, prov, zone1Pod, zone2Pod, zone3Pod, affPod) // the three larger zone specific pods should get scheduled first due to first fit descending onto one // node per zone. ExpectScheduled(ctx, env.Client, zone1Pod) @@ -1610,7 +1624,7 @@ var _ = Describe("Topology", func() { }}}) ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pod, affPod) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod, affPod) // the pod we need to avoid schedules first, but we don't know where. ExpectScheduled(ctx, env.Client, pod) // the pod with anti-affinity @@ -1653,7 +1667,7 @@ var _ = Describe("Topology", func() { pods := []*v1.Pod{affPod1, affPod2} ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pods...) + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) n1 := ExpectScheduled(ctx, env.Client, affPod1) n2 := ExpectScheduled(ctx, env.Client, affPod2) // should not be scheduled on nodes with the same arch @@ -1691,7 +1705,7 @@ var _ = Describe("Topology", func() { affPod := test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: affLabels}}) ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, zone1Pod, zone2Pod, zone3Pod, affPod) + ExpectProvisioned(ctx, env.Client, cluster, prov, zone1Pod, zone2Pod, zone3Pod, affPod) // three pods with anti-affinity will schedule first due to first fit-descending ExpectScheduled(ctx, env.Client, zone1Pod) ExpectScheduled(ctx, env.Client, zone2Pod) @@ -1726,7 +1740,7 @@ var _ = Describe("Topology", func() { affPod := test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: affLabels}}) ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, zone1Pod, zone2Pod, zone3Pod, affPod) + ExpectProvisioned(ctx, env.Client, cluster, prov, zone1Pod, zone2Pod, zone3Pod, affPod) // three pods with anti-affinity will schedule first due to first fit-descending ExpectScheduled(ctx, env.Client, zone1Pod) ExpectScheduled(ctx, env.Client, zone2Pod) @@ -1753,7 +1767,7 @@ var _ = Describe("Topology", func() { affPod := test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: affLabels}}) ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, zoneAnywherePod, affPod) + ExpectProvisioned(ctx, env.Client, cluster, prov, zoneAnywherePod, affPod) // the pod with anti-affinity will schedule first due to first fit-descending, but we don't know which zone it landed in node1 := ExpectScheduled(ctx, env.Client, zoneAnywherePod) @@ -1762,7 +1776,7 @@ var _ = Describe("Topology", func() { // a second batching will now allow the pod to schedule as the zoneAnywherePod has been committed to a zone // by the actual node creation - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, affPod) + ExpectProvisioned(ctx, env.Client, cluster, prov, affPod) node2 := ExpectScheduled(ctx, env.Client, affPod) Expect(node1.Labels[v1.LabelTopologyZone]).ToNot(Equal(node2.Labels[v1.LabelTopologyZone])) @@ -1796,7 +1810,7 @@ var _ = Describe("Topology", func() { // provision these so we get three nodes that exist in the cluster with anti-affinity to a pod that we will // then try to schedule ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, zone1Pod, zone2Pod, zone3Pod) + ExpectProvisioned(ctx, env.Client, cluster, prov, zone1Pod, zone2Pod, zone3Pod) node1 := ExpectScheduled(ctx, env.Client, zone1Pod) node2 := ExpectScheduled(ctx, env.Client, zone2Pod) node3 := ExpectScheduled(ctx, env.Client, zone3Pod) @@ -1814,7 +1828,7 @@ var _ = Describe("Topology", func() { // this pod with no anti-affinity rules can't schedule. It has no anti-affinity rules, but every zone has an // existing pod (not from this batch) with anti-affinity rules that prevent it from scheduling - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, affPod) + ExpectProvisioned(ctx, env.Client, cluster, prov, affPod) ExpectNotScheduled(ctx, env.Client, affPod) }) It("should violate preferred pod anti-affinity on zone (inverse w/existing nodes)", func() { @@ -1851,7 +1865,7 @@ var _ = Describe("Topology", func() { // provision these so we get three nodes that exist in the cluster with anti-affinity to a pod that we will // then try to schedule ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, zone1Pod, zone2Pod, zone3Pod) + ExpectProvisioned(ctx, env.Client, cluster, prov, zone1Pod, zone2Pod, zone3Pod) node1 := ExpectScheduled(ctx, env.Client, zone1Pod) node2 := ExpectScheduled(ctx, env.Client, zone2Pod) node3 := ExpectScheduled(ctx, env.Client, zone3Pod) @@ -1864,7 +1878,7 @@ var _ = Describe("Topology", func() { ExpectReconcileSucceeded(ctx, podStateController, client.ObjectKeyFromObject(zone3Pod)) // this pod with no anti-affinity rules can schedule, though it couldn't if the anti-affinity were required - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, affPod) + ExpectProvisioned(ctx, env.Client, cluster, prov, affPod) ExpectScheduled(ctx, env.Client, affPod) }) It("should allow violation of a pod affinity preference with a conflicting required constraint", func() { @@ -1893,7 +1907,8 @@ var _ = Describe("Topology", func() { }, }}}) ExpectApplied(ctx, env.Client, provisioner) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, append(affPods, affPod1)...) + pods := append(affPods, affPod1) + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) // all pods should be scheduled since the affinity term is just a preference for _, pod := range pods { ExpectScheduled(ctx, env.Client, pod) @@ -1925,24 +1940,24 @@ var _ = Describe("Topology", func() { // one pod pod will schedule ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, createPods()...) + ExpectProvisioned(ctx, env.Client, cluster, prov, createPods()...) ExpectSkew(ctx, env.Client, "default", top).To(ConsistOf(1)) // delete all of the unscheduled ones as provisioning will only bind pods passed into the provisioning call // the scheduler looks at all pods though, so it may assume a pod from this batch schedules and no others do ExpectDeleteAllUnscheduledPods(ctx, env.Client) // second pod in a second zone - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, createPods()...) + ExpectProvisioned(ctx, env.Client, cluster, prov, createPods()...) ExpectSkew(ctx, env.Client, "default", top).To(ConsistOf(1, 1)) ExpectDeleteAllUnscheduledPods(ctx, env.Client) // third pod in the last zone - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, createPods()...) + ExpectProvisioned(ctx, env.Client, cluster, prov, createPods()...) ExpectSkew(ctx, env.Client, "default", top).To(ConsistOf(1, 1, 1)) ExpectDeleteAllUnscheduledPods(ctx, env.Client) // and nothing else can schedule - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, createPods()...) + ExpectProvisioned(ctx, env.Client, cluster, prov, createPods()...) ExpectSkew(ctx, env.Client, "default", top).To(ConsistOf(1, 1, 1)) ExpectDeleteAllUnscheduledPods(ctx, env.Client) }) @@ -1957,9 +1972,9 @@ var _ = Describe("Topology", func() { }}}) ExpectApplied(ctx, env.Client, provisioner) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, affPods...) + ExpectProvisioned(ctx, env.Client, cluster, prov, affPods...) // the pod we have affinity to is not in the cluster, so all of these pods are unschedulable - for _, p := range pods { + for _, p := range affPods { ExpectNotScheduled(ctx, env.Client, p) } }) @@ -1979,7 +1994,7 @@ var _ = Describe("Topology", func() { }}}) ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, append(affPods, targetPod)...) + ExpectProvisioned(ctx, env.Client, cluster, prov, append(affPods, targetPod)...) top := &v1.TopologySpreadConstraint{TopologyKey: v1.LabelTopologyZone} // these pods can't schedule as the pod they have affinity to isn't limited to any particular zone for i := range affPods { @@ -1990,7 +2005,7 @@ var _ = Describe("Topology", func() { // now that targetPod has been scheduled to a node, it's zone is committed and the pods with affinity to it // should schedule in the same zone - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, affPods...) + ExpectProvisioned(ctx, env.Client, cluster, prov, affPods...) for _, pod := range affPods { ExpectScheduled(ctx, env.Client, pod) } @@ -2021,7 +2036,7 @@ var _ = Describe("Topology", func() { affPods = append(affPods, affPod1) ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, affPods...) + ExpectProvisioned(ctx, env.Client, cluster, prov, affPods...) top := &v1.TopologySpreadConstraint{TopologyKey: v1.LabelTopologyZone} ExpectSkew(ctx, env.Client, "default", top).To(ConsistOf(11)) }) @@ -2033,7 +2048,7 @@ var _ = Describe("Topology", func() { for i := 0; i < 50; i++ { ExpectApplied(ctx, env.Client, provisioner.DeepCopy()) // we have to schedule DB -> Web -> Cache -> UI in that order or else there are pod affinity violations - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + pods := []*v1.Pod{ test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: dbLabels}}), test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: webLabels}, PodRequirements: []v1.PodAffinityTerm{{ @@ -2051,7 +2066,8 @@ var _ = Describe("Topology", func() { LabelSelector: &metav1.LabelSelector{MatchLabels: cacheLabels}, TopologyKey: v1.LabelHostname}, }}), - ) + } + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) for i := range pods { ExpectScheduled(ctx, env.Client, pods[i]) } @@ -2065,16 +2081,15 @@ var _ = Describe("Topology", func() { ExpectApplied(ctx, env.Client, provisioner) // this pods wants to schedule with a non-existent pod, this test just ensures that the scheduling loop // doesn't infinite loop - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, - test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: dbLabels}, - PodRequirements: []v1.PodAffinityTerm{ - { - LabelSelector: &metav1.LabelSelector{MatchLabels: webLabels}, - TopologyKey: v1.LabelHostname, - }, - }}), - ) - ExpectNotScheduled(ctx, env.Client, pods[0]) + pod := test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: dbLabels}, + PodRequirements: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{MatchLabels: webLabels}, + TopologyKey: v1.LabelHostname, + }, + }}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + ExpectNotScheduled(ctx, env.Client, pod) }) It("should filter pod affinity topologies by namespace, no matching pods", func() { topology := []v1.TopologySpreadConstraint{{ @@ -2106,7 +2121,7 @@ var _ = Describe("Topology", func() { pods = append(pods, affPod2) ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pods...) + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) // the target pod gets scheduled ExpectScheduled(ctx, env.Client, affPod1) @@ -2147,7 +2162,7 @@ var _ = Describe("Topology", func() { pods = append(pods, affPod2) ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pods...) + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) n1 := ExpectScheduled(ctx, env.Client, affPod1) n2 := ExpectScheduled(ctx, env.Client, affPod2) // should be scheduled on the same node @@ -2190,7 +2205,7 @@ var _ = Describe("Topology", func() { pods = append(pods, affPod2) ExpectApplied(ctx, env.Client, provisioner) - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pods...) + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) n1 := ExpectScheduled(ctx, env.Client, affPod1) n2 := ExpectScheduled(ctx, env.Client, affPod2) // should be scheduled on the same node due to the empty namespace selector @@ -2212,7 +2227,7 @@ var _ = Describe("Topology", func() { LabelSelector: &metav1.LabelSelector{MatchLabels: labels}, WhenUnsatisfiable: v1.DoNotSchedule, } - ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.Pods(10, test.UnscheduleablePodOptions(test.PodOptions{ + ExpectProvisioned(ctx, env.Client, cluster, prov, test.Pods(10, test.UnscheduleablePodOptions(test.PodOptions{ ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: []v1.TopologySpreadConstraint{topology}, }))...) @@ -2235,32 +2250,37 @@ var _ = Describe("Taints", func() { It("should taint nodes with provisioner taints", func() { provisioner.Spec.Taints = []v1.Taint{{Key: "test", Value: "bar", Effect: v1.TaintEffectNoSchedule}} ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{Tolerations: []v1.Toleration{{Effect: v1.TaintEffectNoSchedule, Operator: v1.TolerationOpExists}}}, - ))[0] + ) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Spec.Taints).To(ContainElement(provisioner.Spec.Taints[0])) }) It("should schedule pods that tolerate provisioner constraints", func() { provisioner.Spec.Taints = []v1.Taint{{Key: "test-key", Value: "test-value", Effect: v1.TaintEffectNoSchedule}} ExpectApplied(ctx, env.Client, provisioner) - for _, pod := range ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + pods := []*v1.Pod{ // Tolerates with OpExists test.UnschedulablePod(test.PodOptions{Tolerations: []v1.Toleration{{Key: "test-key", Operator: v1.TolerationOpExists, Effect: v1.TaintEffectNoSchedule}}}), // Tolerates with OpEqual test.UnschedulablePod(test.PodOptions{Tolerations: []v1.Toleration{{Key: "test-key", Value: "test-value", Operator: v1.TolerationOpEqual, Effect: v1.TaintEffectNoSchedule}}}), - ) { + } + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) + for _, pod := range pods { ExpectScheduled(ctx, env.Client, pod) } ExpectApplied(ctx, env.Client, provisioner) - for _, pod := range ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + otherPods := []*v1.Pod{ // Missing toleration test.UnschedulablePod(), // key mismatch with OpExists test.UnschedulablePod(test.PodOptions{Tolerations: []v1.Toleration{{Key: "invalid", Operator: v1.TolerationOpExists}}}), // value mismatch test.UnschedulablePod(test.PodOptions{Tolerations: []v1.Toleration{{Key: "test-key", Operator: v1.TolerationOpEqual, Effect: v1.TaintEffectNoSchedule}}}), - ) { + } + ExpectProvisioned(ctx, env.Client, cluster, prov, otherPods...) + for _, pod := range otherPods { ExpectNotScheduled(ctx, env.Client, pod) } }) @@ -2268,14 +2288,14 @@ var _ = Describe("Taints", func() { provisioner.Spec.StartupTaints = []v1.Taint{{Key: "ignore-me", Value: "nothing-to-see-here", Effect: v1.TaintEffectNoSchedule}} ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod())[0] + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectScheduled(ctx, env.Client, pod) }) It("should not generate taints for OpExists", func() { ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, - test.UnschedulablePod(test.PodOptions{Tolerations: []v1.Toleration{{Key: "test-key", Operator: v1.TolerationOpExists, Effect: v1.TaintEffectNoExecute}}}), - )[0] + pod := test.UnschedulablePod(test.PodOptions{Tolerations: []v1.Toleration{{Key: "test-key", Operator: v1.TolerationOpExists, Effect: v1.TaintEffectNoExecute}}}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Spec.Taints).To(HaveLen(1)) // Expect no taints generated beyond the default }) diff --git a/pkg/controllers/provisioning/scheduling/topologynodefilter.go b/pkg/controllers/provisioning/scheduling/topologynodefilter.go index 51f485dc36..0c198d286c 100644 --- a/pkg/controllers/provisioning/scheduling/topologynodefilter.go +++ b/pkg/controllers/provisioning/scheduling/topologynodefilter.go @@ -53,7 +53,7 @@ func (t TopologyNodeFilter) Matches(node *v1.Node) bool { } // MatchesRequirements returns true if the TopologyNodeFilter doesn't prohibit a node with the requirements from -// participating in the topology. This method allows checking the requirements from a scheduling.Node to see if the +// participating in the topology. This method allows checking the requirements from a scheduling.Machine to see if the // node we will soon create participates in this topology. func (t TopologyNodeFilter) MatchesRequirements(requirements scheduling.Requirements) bool { // no requirements, so it always matches diff --git a/pkg/controllers/provisioning/suite_test.go b/pkg/controllers/provisioning/suite_test.go index 29860ad8bd..d13177ea44 100644 --- a/pkg/controllers/provisioning/suite_test.go +++ b/pkg/controllers/provisioning/suite_test.go @@ -21,15 +21,15 @@ import ( "time" "github.com/samber/lo" - "knative.dev/pkg/ptr" - "sigs.k8s.io/controller-runtime/pkg/client" - v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/record" clock "k8s.io/utils/clock/testing" + "knative.dev/pkg/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" "github.com/aws/karpenter-core/pkg/apis" "github.com/aws/karpenter-core/pkg/apis/settings" @@ -39,6 +39,7 @@ import ( "github.com/aws/karpenter-core/pkg/controllers/provisioning" "github.com/aws/karpenter-core/pkg/controllers/state" "github.com/aws/karpenter-core/pkg/controllers/state/informer" + "github.com/aws/karpenter-core/pkg/events" "github.com/aws/karpenter-core/pkg/operator/controller" "github.com/aws/karpenter-core/pkg/operator/scheme" "github.com/aws/karpenter-core/pkg/test" @@ -59,7 +60,6 @@ var cloudProvider *fake.CloudProvider var prov *provisioning.Provisioner var provisioningController controller.Controller var env *test.Environment -var recorder *test.EventRecorder var instanceTypeMap map[string]*cloudprovider.InstanceType func TestAPIs(t *testing.T) { @@ -72,12 +72,11 @@ var _ = BeforeSuite(func() { env = test.NewEnvironment(scheme.Scheme, test.WithCRDs(apis.CRDs...)) ctx = settings.ToContext(ctx, test.Settings()) cloudProvider = fake.NewCloudProvider() - recorder = test.NewEventRecorder() fakeClock = clock.NewFakeClock(time.Now()) cluster = state.NewCluster(fakeClock, env.Client, cloudProvider) nodeController = informer.NewNodeController(env.Client, cluster) - prov = provisioning.NewProvisioner(ctx, env.Client, corev1.NewForConfigOrDie(env.Config), recorder, cloudProvider, cluster) - provisioningController = provisioning.NewController(env.Client, prov, recorder) + prov = provisioning.NewProvisioner(ctx, env.Client, corev1.NewForConfigOrDie(env.Config), events.NewRecorder(&record.FakeRecorder{}), cloudProvider, cluster) + provisioningController = provisioning.NewController(env.Client, prov, events.NewRecorder(&record.FakeRecorder{})) instanceTypes, _ := cloudProvider.GetInstanceTypes(context.Background(), nil) instanceTypeMap = map[string]*cloudprovider.InstanceType{} for _, it := range instanceTypes { @@ -96,32 +95,29 @@ var _ = AfterSuite(func() { var _ = AfterEach(func() { ExpectCleanedUp(ctx, env.Client) - recorder.Reset() cluster.Reset() }) var _ = Describe("Provisioning", func() { It("should provision nodes", func() { ExpectApplied(ctx, env.Client, test.Provisioner()) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod()) + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) nodes := &v1.NodeList{} Expect(env.Client.List(ctx, nodes)).To(Succeed()) Expect(len(nodes.Items)).To(Equal(1)) - for _, pod := range pods { - ExpectScheduled(ctx, env.Client, pod) - } + ExpectScheduled(ctx, env.Client, pod) }) It("should ignore provisioners that are deleting", func() { provisioner := test.Provisioner() ExpectApplied(ctx, env.Client, provisioner) ExpectDeletionTimestampSet(ctx, env.Client, provisioner) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod()) + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) nodes := &v1.NodeList{} Expect(env.Client.List(ctx, nodes)).To(Succeed()) Expect(len(nodes.Items)).To(Equal(0)) - for _, pod := range pods { - ExpectNotScheduled(ctx, env.Client, pod) - } + ExpectNotScheduled(ctx, env.Client, pod) }) It("should provision nodes for pods with supported node selectors", func() { provisioner := test.Provisioner() @@ -154,23 +150,27 @@ var _ = Describe("Provisioning", func() { test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{"foo": "bar"}}), } ExpectApplied(ctx, env.Client, provisioner) - for _, pod := range ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, schedulable...) { + ExpectProvisioned(ctx, env.Client, cluster, prov, schedulable...) + for _, pod := range schedulable { ExpectScheduled(ctx, env.Client, pod) } - for _, pod := range ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, unschedulable...) { + ExpectProvisioned(ctx, env.Client, cluster, prov, unschedulable...) + for _, pod := range unschedulable { ExpectNotScheduled(ctx, env.Client, pod) } }) It("should provision nodes for accelerators", func() { ExpectApplied(ctx, env.Client, test.Provisioner()) - for _, pod := range ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + pods := []*v1.Pod{ test.UnschedulablePod(test.PodOptions{ ResourceRequirements: v1.ResourceRequirements{Limits: v1.ResourceList{fake.ResourceGPUVendorA: resource.MustParse("1")}}, }), test.UnschedulablePod(test.PodOptions{ ResourceRequirements: v1.ResourceRequirements{Limits: v1.ResourceList{fake.ResourceGPUVendorB: resource.MustParse("1")}}, }), - ) { + } + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) + for _, pod := range pods { ExpectScheduled(ctx, env.Client, pod) } }) @@ -187,7 +187,10 @@ var _ = Describe("Provisioning", func() { }, }, })) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(), test.UnschedulablePod(), test.UnschedulablePod()) + pods := []*v1.Pod{ + test.UnschedulablePod(), test.UnschedulablePod(), test.UnschedulablePod(), + } + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) nodes := &v1.NodeList{} Expect(env.Client.List(ctx, nodes)).To(Succeed()) Expect(len(nodes.Items)).To(Equal(3)) @@ -224,15 +227,15 @@ var _ = Describe("Provisioning", func() { // Provision without a binding since some pods will already be bound // Should all schedule to the new node, ignoring the old node - ExpectProvisionedNoBinding(ctx, env.Client, provisioningController, prov, test.UnschedulablePod(), test.UnschedulablePod()) + bindings := ExpectProvisionedNoBinding(ctx, env.Client, prov, test.UnschedulablePod(), test.UnschedulablePod()) nodes := &v1.NodeList{} Expect(env.Client.List(ctx, nodes)).To(Succeed()) Expect(len(nodes.Items)).To(Equal(2)) // Scheduler should attempt to schedule all the pods to the new node - recorder.ForEachBinding(func(p *v1.Pod, n *v1.Node) { + for _, n := range bindings { Expect(n.Name).ToNot(Equal(node.Name)) - }) + } }) Context("Resource Limits", func() { It("should not schedule when limits are exceeded", func() { @@ -244,20 +247,22 @@ var _ = Describe("Provisioning", func() { }, }, })) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod())[0] + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) }) It("should schedule if limits would be met", func() { ExpectApplied(ctx, env.Client, test.Provisioner(test.ProvisionerOptions{ Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2")}, })) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{ResourceRequirements: v1.ResourceRequirements{ Requests: v1.ResourceList{ // requires a 2 CPU node, but leaves room for overhead v1.ResourceCPU: resource.MustParse("1.75"), }, - }}))[0] + }}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) // A 2 CPU node can be launched ExpectScheduled(ctx, env.Client, pod) }) @@ -285,10 +290,11 @@ var _ = Describe("Provisioning", func() { Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("1.5"), }}} - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + pods := []*v1.Pod{ test.UnschedulablePod(opts), test.UnschedulablePod(opts), - ) + } + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) scheduledPodCount := 0 unscheduledPodCount := 0 pod0 := ExpectPodExists(ctx, env.Client, pods[0].Name, pods[0].Namespace) @@ -310,24 +316,26 @@ var _ = Describe("Provisioning", func() { ExpectApplied(ctx, env.Client, test.Provisioner(test.ProvisionerOptions{ Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2")}, })) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{ResourceRequirements: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("2.1"), }, - }}))[0] + }}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) }) It("should not schedule if limits would be exceeded (GPU)", func() { ExpectApplied(ctx, env.Client, test.Provisioner(test.ProvisionerOptions{ Limits: v1.ResourceList{v1.ResourcePods: resource.MustParse("1")}, })) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{ResourceRequirements: v1.ResourceRequirements{ Limits: v1.ResourceList{ fake.ResourceGPUVendorA: resource.MustParse("1"), }, - }}))[0] + }}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) // only available instance type has 2 GPUs which would exceed the limit ExpectNotScheduled(ctx, env.Client, pod) }) @@ -335,24 +343,26 @@ var _ = Describe("Provisioning", func() { ExpectApplied(ctx, env.Client, test.Provisioner(test.ProvisionerOptions{ Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2")}, })) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{ResourceRequirements: v1.ResourceRequirements{ Requests: v1.ResourceList{ // requires a 2 CPU node, but leaves room for overhead v1.ResourceCPU: resource.MustParse("1.75"), }, - }}))[0] + }}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) // A 2 CPU node can be launched ExpectScheduled(ctx, env.Client, pod) // This pod requests over the existing limit (would add to 3.5 CPUs) so this should fail - pod = ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod = test.UnschedulablePod( test.PodOptions{ResourceRequirements: v1.ResourceRequirements{ Requests: v1.ResourceList{ // requires a 2 CPU node, but leaves room for overhead v1.ResourceCPU: resource.MustParse("1.75"), }, - }}))[0] + }}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) }) }) @@ -363,11 +373,12 @@ var _ = Describe("Provisioning", func() { ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}}, }}, )) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{ ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}}, }, - ))[0] + ) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) allocatable := instanceTypeMap[node.Labels[v1.LabelInstanceTypeStable]].Capacity @@ -384,11 +395,12 @@ var _ = Describe("Provisioning", func() { ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}}, }}, )) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{ ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}}, }, - ))[0] + ) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) allocatable := instanceTypeMap[node.Labels[v1.LabelInstanceTypeStable]].Capacity @@ -401,7 +413,8 @@ var _ = Describe("Provisioning", func() { ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("10000"), v1.ResourceMemory: resource.MustParse("10000Gi")}}, }}, )) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(test.PodOptions{}))[0] + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) }) It("should not schedule if resource requests are not defined and limits (requests) are too large", func() { @@ -413,7 +426,8 @@ var _ = Describe("Provisioning", func() { }, }}, )) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(test.PodOptions{}))[0] + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) }) It("should schedule based on the max resource requests of containers and initContainers", func() { @@ -430,7 +444,8 @@ var _ = Describe("Provisioning", func() { }, }}, )) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(test.PodOptions{}))[0] + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) allocatable := instanceTypeMap[node.Labels[v1.LabelInstanceTypeStable]].Capacity Expect(*allocatable.Cpu()).To(Equal(resource.MustParse("4"))) @@ -450,7 +465,8 @@ var _ = Describe("Provisioning", func() { }, }}, )) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(test.PodOptions{}))[0] + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) }) It("should not schedule if initContainer resources are too large", func() { @@ -462,14 +478,16 @@ var _ = Describe("Provisioning", func() { }, }}, )) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(test.PodOptions{}))[0] + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) }) It("should be able to schedule pods if resource requests and limits are not defined", func() { ExpectApplied(ctx, env.Client, test.Provisioner(), test.DaemonSet( test.DaemonSetOptions{PodOptions: test.PodOptions{}}, )) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(test.PodOptions{}))[0] + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectScheduled(ctx, env.Client, pod) }) It("should ignore daemonsets without matching tolerations", func() { @@ -480,12 +498,13 @@ var _ = Describe("Provisioning", func() { ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}}, }}, )) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{ Tolerations: []v1.Toleration{{Operator: v1.TolerationOperator(v1.NodeSelectorOpExists)}}, ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}}, }, - ))[0] + ) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) allocatable := instanceTypeMap[node.Labels[v1.LabelInstanceTypeStable]].Capacity Expect(*allocatable.Cpu()).To(Equal(resource.MustParse("2"))) @@ -498,11 +517,12 @@ var _ = Describe("Provisioning", func() { ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}}, }}, )) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{ ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}}, }, - ))[0] + ) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) allocatable := instanceTypeMap[node.Labels[v1.LabelInstanceTypeStable]].Capacity Expect(*allocatable.Cpu()).To(Equal(resource.MustParse("2"))) @@ -515,12 +535,13 @@ var _ = Describe("Provisioning", func() { ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}}, }}, )) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{ NodeRequirements: []v1.NodeSelectorRequirement{{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-2"}}}, ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}}, }, - ))[0] + ) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) allocatable := instanceTypeMap[node.Labels[v1.LabelInstanceTypeStable]].Capacity Expect(*allocatable.Cpu()).To(Equal(resource.MustParse("4"))) @@ -533,10 +554,10 @@ var _ = Describe("Provisioning", func() { Annotations: map[string]string{v1alpha5.DoNotConsolidateNodeAnnotationKey: "true"}, }) ExpectApplied(ctx, env.Client, provisioner) - for _, pod := range ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod()) { - node := ExpectScheduled(ctx, env.Client, pod) - Expect(node.Annotations).To(HaveKeyWithValue(v1alpha5.DoNotConsolidateNodeAnnotationKey, "true")) - } + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + node := ExpectScheduled(ctx, env.Client, pod) + Expect(node.Annotations).To(HaveKeyWithValue(v1alpha5.DoNotConsolidateNodeAnnotationKey, "true")) }) }) Context("Labels", func() { @@ -553,27 +574,28 @@ var _ = Describe("Provisioning", func() { }, }) ExpectApplied(ctx, env.Client, provisioner) - for _, pod := range ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod()) { - node := ExpectScheduled(ctx, env.Client, pod) - Expect(node.Labels).To(HaveKeyWithValue(v1alpha5.ProvisionerNameLabelKey, provisioner.Name)) - Expect(node.Labels).To(HaveKeyWithValue("test-key-1", "test-value-1")) - Expect(node.Labels).To(HaveKeyWithValue("test-key-2", "test-value-2")) - Expect(node.Labels).To(And(HaveKey("test-key-3"), Not(HaveValue(Equal("test-value-3"))))) - Expect(node.Labels).To(And(HaveKey("test-key-4"), Not(HaveValue(Equal("test-value-4"))))) - Expect(node.Labels).To(And(HaveKey("test-key-5"), Not(HaveValue(Equal("test-value-5"))))) - Expect(node.Labels).To(HaveKey("test-key-6")) - Expect(node.Labels).ToNot(HaveKey("test-key-7")) - } + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) + node := ExpectScheduled(ctx, env.Client, pod) + Expect(node.Labels).To(HaveKeyWithValue(v1alpha5.ProvisionerNameLabelKey, provisioner.Name)) + Expect(node.Labels).To(HaveKeyWithValue("test-key-1", "test-value-1")) + Expect(node.Labels).To(HaveKeyWithValue("test-key-2", "test-value-2")) + Expect(node.Labels).To(And(HaveKey("test-key-3"), Not(HaveValue(Equal("test-value-3"))))) + Expect(node.Labels).To(And(HaveKey("test-key-4"), Not(HaveValue(Equal("test-value-4"))))) + Expect(node.Labels).To(And(HaveKey("test-key-5"), Not(HaveValue(Equal("test-value-5"))))) + Expect(node.Labels).To(HaveKey("test-key-6")) + Expect(node.Labels).ToNot(HaveKey("test-key-7")) }) It("should label nodes with labels in the LabelDomainExceptions list", func() { for domain := range v1alpha5.LabelDomainExceptions { provisioner := test.Provisioner(test.ProvisionerOptions{Labels: map[string]string{domain + "/test": "test-value"}}) ExpectApplied(ctx, env.Client, provisioner) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{ NodeRequirements: []v1.NodeSelectorRequirement{{Key: domain + "/test", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}}}, }, - ))[0] + ) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).To(HaveKeyWithValue(domain+"/test", "test-value")) } @@ -584,7 +606,7 @@ var _ = Describe("Provisioning", func() { It("should schedule pods that tolerate taints", func() { provisioner := test.Provisioner(test.ProvisionerOptions{Taints: []v1.Taint{{Key: "nvidia.com/gpu", Value: "true", Effect: v1.TaintEffectNoSchedule}}}) ExpectApplied(ctx, env.Client, provisioner) - for _, pod := range ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, + pods := []*v1.Pod{ test.UnschedulablePod( test.PodOptions{Tolerations: []v1.Toleration{ { @@ -615,7 +637,9 @@ var _ = Describe("Provisioning", func() { Operator: v1.TolerationOpExists, }, }}), - ) { + } + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) + for _, pod := range pods { ExpectScheduled(ctx, env.Client, pod) } }) @@ -624,7 +648,8 @@ var _ = Describe("Provisioning", func() { It("should create a machine request with expected requirements", func() { provisioner := test.Provisioner() ExpectApplied(ctx, env.Client, provisioner) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod()) + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) Expect(cloudProvider.CreateCalls).To(HaveLen(1)) ExpectMachineRequirements(cloudProvider.CreateCalls[0], @@ -639,9 +664,7 @@ var _ = Describe("Provisioning", func() { Values: []string{provisioner.Name}, }, ) - for _, pod := range pods { - ExpectScheduled(ctx, env.Client, pod) - } + ExpectScheduled(ctx, env.Client, pod) }) It("should create a machine request with additional expected requirements", func() { provisioner := test.Provisioner(test.ProvisionerOptions{ @@ -659,7 +682,8 @@ var _ = Describe("Provisioning", func() { }, }) ExpectApplied(ctx, env.Client, provisioner) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod()) + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) Expect(cloudProvider.CreateCalls).To(HaveLen(1)) ExpectMachineRequirements(cloudProvider.CreateCalls[0], @@ -684,9 +708,7 @@ var _ = Describe("Provisioning", func() { Values: []string{"value"}, }, ) - for _, pod := range pods { - ExpectScheduled(ctx, env.Client, pod) - } + ExpectScheduled(ctx, env.Client, pod) }) It("should create a machine request restricting instance types on architecture", func() { provisioner := test.Provisioner(test.ProvisionerOptions{ @@ -699,7 +721,8 @@ var _ = Describe("Provisioning", func() { }, }) ExpectApplied(ctx, env.Client, provisioner) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod()) + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) Expect(cloudProvider.CreateCalls).To(HaveLen(1)) @@ -716,9 +739,7 @@ var _ = Describe("Provisioning", func() { Values: []string{"arm-instance-type"}, }, ) - for _, pod := range pods { - ExpectScheduled(ctx, env.Client, pod) - } + ExpectScheduled(ctx, env.Client, pod) }) It("should create a machine request restricting instance types on operating system", func() { provisioner := test.Provisioner(test.ProvisionerOptions{ @@ -731,7 +752,8 @@ var _ = Describe("Provisioning", func() { }, }) ExpectApplied(ctx, env.Client, provisioner) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod()) + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) Expect(cloudProvider.CreateCalls).To(HaveLen(1)) @@ -748,14 +770,12 @@ var _ = Describe("Provisioning", func() { Values: []string{"arm-instance-type"}, }, ) - for _, pod := range pods { - ExpectScheduled(ctx, env.Client, pod) - } + ExpectScheduled(ctx, env.Client, pod) }) It("should create a machine request restricting instance types based on pod resource requests", func() { provisioner := test.Provisioner() ExpectApplied(ctx, env.Client, provisioner) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(test.PodOptions{ + pod := test.UnschedulablePod(test.PodOptions{ ResourceRequirements: v1.ResourceRequirements{ Requests: v1.ResourceList{ fake.ResourceGPUVendorA: resource.MustParse("1"), @@ -764,7 +784,8 @@ var _ = Describe("Provisioning", func() { fake.ResourceGPUVendorA: resource.MustParse("1"), }, }, - })) + }) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) Expect(cloudProvider.CreateCalls).To(HaveLen(1)) @@ -776,14 +797,13 @@ var _ = Describe("Provisioning", func() { Values: []string{"gpu-vendor-instance-type"}, }, ) - for _, pod := range pods { - ExpectScheduled(ctx, env.Client, pod) - } + ExpectScheduled(ctx, env.Client, pod) }) It("should create a machine request with the correct owner reference", func() { provisioner := test.Provisioner(test.ProvisionerOptions{}) ExpectApplied(ctx, env.Client, provisioner) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod()) + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) Expect(cloudProvider.CreateCalls).To(HaveLen(1)) Expect(cloudProvider.CreateCalls[0].OwnerReferences).To(ContainElement( @@ -794,9 +814,7 @@ var _ = Describe("Provisioning", func() { UID: provisioner.UID, }, )) - for _, pod := range pods { - ExpectScheduled(ctx, env.Client, pod) - } + ExpectScheduled(ctx, env.Client, pod) }) It("should create a machine request propagating the provider reference", func() { ExpectApplied(ctx, env.Client, test.Provisioner(test.ProvisionerOptions{ @@ -806,7 +824,8 @@ var _ = Describe("Provisioning", func() { Name: "default", }, })) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod()) + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) Expect(cloudProvider.CreateCalls).To(HaveLen(1)) Expect(cloudProvider.CreateCalls[0].Spec.MachineTemplateRef).To(Equal( @@ -816,9 +835,7 @@ var _ = Describe("Provisioning", func() { Name: "default", }, )) - for _, pod := range pods { - ExpectScheduled(ctx, env.Client, pod) - } + ExpectScheduled(ctx, env.Client, pod) }) It("should create a machine request with the karpenter.sh/compatibility/provider annotation", func() { ExpectApplied(ctx, env.Client, test.Provisioner(test.ProvisionerOptions{ @@ -827,7 +844,8 @@ var _ = Describe("Provisioning", func() { "providerField2": "value", }, })) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod()) + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) Expect(cloudProvider.CreateCalls).To(HaveLen(1)) Expect(cloudProvider.CreateCalls[0].Annotations).To(HaveKey(v1alpha5.ProviderCompatabilityAnnotationKey)) @@ -837,10 +855,7 @@ var _ = Describe("Provisioning", func() { Expect(json.Unmarshal([]byte(cloudProvider.CreateCalls[0].Annotations[v1alpha5.ProviderCompatabilityAnnotationKey]), &provider)).To(Succeed()) Expect(provider).To(HaveKeyWithValue("providerField1", "value")) Expect(provider).To(HaveKeyWithValue("providerField2", "value")) - - for _, pod := range pods { - ExpectScheduled(ctx, env.Client, pod) - } + ExpectScheduled(ctx, env.Client, pod) }) It("should create a machine with resource requests", func() { ExpectApplied(ctx, env.Client, test.Provisioner(test.ProvisionerOptions{ @@ -849,7 +864,7 @@ var _ = Describe("Provisioning", func() { "providerField2": "value", }, })) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{ ResourceRequirements: v1.ResourceRequirements{ Requests: v1.ResourceList{ @@ -861,8 +876,8 @@ var _ = Describe("Provisioning", func() { fake.ResourceGPUVendorA: resource.MustParse("1"), }, }, - }), - ) + }) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) Expect(cloudProvider.CreateCalls).To(HaveLen(1)) Expect(cloudProvider.CreateCalls[0].Spec.Resources.Requests).To(HaveLen(4)) ExpectMachineRequests(cloudProvider.CreateCalls[0], v1.ResourceList{ @@ -871,9 +886,7 @@ var _ = Describe("Provisioning", func() { fake.ResourceGPUVendorA: resource.MustParse("1"), v1.ResourcePods: resource.MustParse("1"), }) - for _, pod := range pods { - ExpectScheduled(ctx, env.Client, pod) - } + ExpectScheduled(ctx, env.Client, pod) }) It("should create a machine with resource requests with daemon overhead", func() { ExpectApplied(ctx, env.Client, test.Provisioner(), test.DaemonSet( @@ -881,20 +894,19 @@ var _ = Describe("Provisioning", func() { ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Mi")}}, }}, )) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod( + pod := test.UnschedulablePod( test.PodOptions{ ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Mi")}}, }, - )) + ) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) Expect(cloudProvider.CreateCalls).To(HaveLen(1)) ExpectMachineRequests(cloudProvider.CreateCalls[0], v1.ResourceList{ v1.ResourceCPU: resource.MustParse("2"), v1.ResourceMemory: resource.MustParse("2Mi"), v1.ResourcePods: resource.MustParse("2"), }) - for _, pod := range pods { - ExpectScheduled(ctx, env.Client, pod) - } + ExpectScheduled(ctx, env.Client, pod) }) }) }) @@ -906,26 +918,30 @@ var _ = Describe("Volume Topology Requirements", func() { }) It("should not schedule if invalid pvc", func() { ExpectApplied(ctx, env.Client, test.Provisioner()) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(test.PodOptions{ + pod := test.UnschedulablePod(test.PodOptions{ PersistentVolumeClaims: []string{"invalid"}, - }))[0] + }) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) }) It("should schedule with an empty storage class", func() { storageClass := "" persistentVolumeClaim := test.PersistentVolumeClaim(test.PersistentVolumeClaimOptions{StorageClassName: &storageClass}) ExpectApplied(ctx, env.Client, test.Provisioner(), persistentVolumeClaim) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(test.PodOptions{ + pod := test.UnschedulablePod(test.PodOptions{ PersistentVolumeClaims: []string{persistentVolumeClaim.Name}, - }))[0] + }) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectScheduled(ctx, env.Client, pod) }) It("should schedule valid pods when a pod with an invalid pvc is encountered (pvc)", func() { ExpectApplied(ctx, env.Client, test.Provisioner()) - invalidPod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(test.PodOptions{ + invalidPod := test.UnschedulablePod(test.PodOptions{ PersistentVolumeClaims: []string{"invalid"}, - }))[0] - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(test.PodOptions{}))[0] + }) + ExpectProvisioned(ctx, env.Client, cluster, prov, invalidPod) + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, invalidPod) ExpectScheduled(ctx, env.Client, pod) }) @@ -933,10 +949,12 @@ var _ = Describe("Volume Topology Requirements", func() { invalidStorageClass := "invalid-storage-class" persistentVolumeClaim := test.PersistentVolumeClaim(test.PersistentVolumeClaimOptions{StorageClassName: &invalidStorageClass}) ExpectApplied(ctx, env.Client, test.Provisioner(), persistentVolumeClaim) - invalidPod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(test.PodOptions{ + invalidPod := test.UnschedulablePod(test.PodOptions{ PersistentVolumeClaims: []string{persistentVolumeClaim.Name}, - }))[0] - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(test.PodOptions{}))[0] + }) + ExpectProvisioned(ctx, env.Client, cluster, prov, invalidPod) + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, invalidPod) ExpectScheduled(ctx, env.Client, pod) }) @@ -944,43 +962,48 @@ var _ = Describe("Volume Topology Requirements", func() { invalidVolumeName := "invalid-volume-name" persistentVolumeClaim := test.PersistentVolumeClaim(test.PersistentVolumeClaimOptions{VolumeName: invalidVolumeName}) ExpectApplied(ctx, env.Client, test.Provisioner(), persistentVolumeClaim) - invalidPod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(test.PodOptions{ + invalidPod := test.UnschedulablePod(test.PodOptions{ PersistentVolumeClaims: []string{persistentVolumeClaim.Name}, - }))[0] - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(test.PodOptions{}))[0] + }) + ExpectProvisioned(ctx, env.Client, cluster, prov, invalidPod) + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, invalidPod) ExpectScheduled(ctx, env.Client, pod) }) It("should schedule to storage class zones if volume does not exist", func() { persistentVolumeClaim := test.PersistentVolumeClaim(test.PersistentVolumeClaimOptions{StorageClassName: &storageClass.Name}) ExpectApplied(ctx, env.Client, test.Provisioner(), storageClass, persistentVolumeClaim) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(test.PodOptions{ + pod := test.UnschedulablePod(test.PodOptions{ PersistentVolumeClaims: []string{persistentVolumeClaim.Name}, NodeRequirements: []v1.NodeSelectorRequirement{{ Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-3"}, }}, - }))[0] + }) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-3")) }) It("should not schedule if storage class zones are incompatible", func() { persistentVolumeClaim := test.PersistentVolumeClaim(test.PersistentVolumeClaimOptions{StorageClassName: &storageClass.Name}) ExpectApplied(ctx, env.Client, test.Provisioner(), storageClass, persistentVolumeClaim) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(test.PodOptions{ + pod := test.UnschedulablePod(test.PodOptions{ PersistentVolumeClaims: []string{persistentVolumeClaim.Name}, NodeRequirements: []v1.NodeSelectorRequirement{{ Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1"}, }}, - }))[0] + }) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) }) It("should schedule to volume zones if volume already bound", func() { persistentVolume := test.PersistentVolume(test.PersistentVolumeOptions{Zones: []string{"test-zone-3"}}) persistentVolumeClaim := test.PersistentVolumeClaim(test.PersistentVolumeClaimOptions{VolumeName: persistentVolume.Name, StorageClassName: &storageClass.Name}) ExpectApplied(ctx, env.Client, test.Provisioner(), storageClass, persistentVolumeClaim, persistentVolume) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(test.PodOptions{ + pod := test.UnschedulablePod(test.PodOptions{ PersistentVolumeClaims: []string{persistentVolumeClaim.Name}, - }))[0] + }) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-3")) }) @@ -988,12 +1011,13 @@ var _ = Describe("Volume Topology Requirements", func() { persistentVolume := test.PersistentVolume(test.PersistentVolumeOptions{Zones: []string{"test-zone-3"}}) persistentVolumeClaim := test.PersistentVolumeClaim(test.PersistentVolumeClaimOptions{VolumeName: persistentVolume.Name, StorageClassName: &storageClass.Name}) ExpectApplied(ctx, env.Client, test.Provisioner(), storageClass, persistentVolumeClaim, persistentVolume) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(test.PodOptions{ + pod := test.UnschedulablePod(test.PodOptions{ PersistentVolumeClaims: []string{persistentVolumeClaim.Name}, NodeRequirements: []v1.NodeSelectorRequirement{{ Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1"}, }}, - }))[0] + }) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) }) It("should not relax an added volume topology zone node-selector away", func() { @@ -1025,7 +1049,7 @@ var _ = Describe("Volume Topology Requirements", func() { }, }, }) - pod = ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pod)[0] + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-3")) }) @@ -1042,7 +1066,7 @@ var _ = Describe("Preferential Fallback", func() { }}}} // Don't relax ExpectApplied(ctx, env.Client, test.Provisioner(test.ProvisionerOptions{Requirements: []v1.NodeSelectorRequirement{{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1"}}}})) - pod = ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pod)[0] + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) }) It("should relax multiple terms", func() { @@ -1063,7 +1087,7 @@ var _ = Describe("Preferential Fallback", func() { }}}} // Success ExpectApplied(ctx, env.Client, test.Provisioner()) - pod = ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pod)[0] + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-1")) }) @@ -1085,7 +1109,7 @@ var _ = Describe("Preferential Fallback", func() { }}} // Success ExpectApplied(ctx, env.Client, test.Provisioner()) - pod = ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pod)[0] + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) ExpectScheduled(ctx, env.Client, pod) }) It("should relax to use lighter weights", func() { @@ -1109,7 +1133,7 @@ var _ = Describe("Preferential Fallback", func() { }}} // Success ExpectApplied(ctx, env.Client, test.Provisioner(test.ProvisionerOptions{Requirements: []v1.NodeSelectorRequirement{{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2"}}}})) - pod = ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pod)[0] + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-2")) }) @@ -1129,7 +1153,7 @@ var _ = Describe("Preferential Fallback", func() { }}} // Success ExpectApplied(ctx, env.Client, test.Provisioner(test.ProvisionerOptions{Taints: []v1.Taint{{Key: "foo", Value: "bar", Effect: v1.TaintEffectPreferNoSchedule}}})) - pod = ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, pod)[0] + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Spec.Taints).To(ContainElement(v1.Taint{Key: "foo", Value: "bar", Effect: v1.TaintEffectPreferNoSchedule})) }) @@ -1140,25 +1164,24 @@ var _ = Describe("Multiple Provisioners", func() { It("should schedule to an explicitly selected provisioner", func() { provisioner := test.Provisioner() ExpectApplied(ctx, env.Client, provisioner, test.Provisioner()) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, - test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1alpha5.ProvisionerNameLabelKey: provisioner.Name}}), - )[0] + pod := test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1alpha5.ProvisionerNameLabelKey: provisioner.Name}}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels[v1alpha5.ProvisionerNameLabelKey]).To(Equal(provisioner.Name)) }) It("should schedule to a provisioner by labels", func() { provisioner := test.Provisioner(test.ProvisionerOptions{Labels: map[string]string{"foo": "bar"}}) ExpectApplied(ctx, env.Client, provisioner, test.Provisioner()) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, - test.UnschedulablePod(test.PodOptions{NodeSelector: provisioner.Spec.Labels}), - )[0] + pod := test.UnschedulablePod(test.PodOptions{NodeSelector: provisioner.Spec.Labels}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels[v1alpha5.ProvisionerNameLabelKey]).To(Equal(provisioner.Name)) }) It("should not match provisioner with PreferNoSchedule taint when other provisioner match", func() { provisioner := test.Provisioner(test.ProvisionerOptions{Taints: []v1.Taint{{Key: "foo", Value: "bar", Effect: v1.TaintEffectPreferNoSchedule}}}) ExpectApplied(ctx, env.Client, provisioner, test.Provisioner()) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod())[0] + pod := test.UnschedulablePod() + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels[v1alpha5.ProvisionerNameLabelKey]).ToNot(Equal(provisioner.Name)) }) @@ -1170,7 +1193,10 @@ var _ = Describe("Multiple Provisioners", func() { test.Provisioner(test.ProvisionerOptions{Weight: ptr.Int32(100)}), } ExpectApplied(ctx, env.Client, provisioners...) - pods := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, test.UnschedulablePod(), test.UnschedulablePod(), test.UnschedulablePod()) + pods := []*v1.Pod{ + test.UnschedulablePod(), test.UnschedulablePod(), test.UnschedulablePod(), + } + ExpectProvisioned(ctx, env.Client, cluster, prov, pods...) for _, pod := range pods { node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels[v1alpha5.ProvisionerNameLabelKey]).To(Equal(provisioners[2].GetName())) @@ -1184,9 +1210,8 @@ var _ = Describe("Multiple Provisioners", func() { test.Provisioner(test.ProvisionerOptions{Weight: ptr.Int32(100)}), } ExpectApplied(ctx, env.Client, provisioners...) - pod := ExpectProvisioned(ctx, env.Client, cluster, recorder, provisioningController, prov, - test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1alpha5.ProvisionerNameLabelKey: targetedProvisioner.Name}}), - )[0] + pod := test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1alpha5.ProvisionerNameLabelKey: targetedProvisioner.Name}}) + ExpectProvisioned(ctx, env.Client, cluster, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels[v1alpha5.ProvisionerNameLabelKey]).To(Equal(targetedProvisioner.Name)) }) diff --git a/pkg/controllers/state/cluster.go b/pkg/controllers/state/cluster.go index c0fe88c695..1c05a9dd12 100644 --- a/pkg/controllers/state/cluster.go +++ b/pkg/controllers/state/cluster.go @@ -462,7 +462,7 @@ func (c *Cluster) updateNodeUsageFromPod(ctx context.Context, pod *v1.Pod) error n, ok := c.nodes[c.nameToProviderID[pod.Spec.NodeName]] if !ok { // the node must exist for us to update the resource requests on the node - return errors.NewNotFound(schema.GroupResource{Resource: "Node"}, pod.Spec.NodeName) + return errors.NewNotFound(schema.GroupResource{Resource: "Machine"}, pod.Spec.NodeName) } c.cleanupOldBindings(pod) n.updateForPod(ctx, pod) diff --git a/pkg/controllers/termination/suite_test.go b/pkg/controllers/termination/suite_test.go index 0e050915f1..750b312785 100644 --- a/pkg/controllers/termination/suite_test.go +++ b/pkg/controllers/termination/suite_test.go @@ -21,6 +21,7 @@ import ( "testing" "time" + "k8s.io/client-go/tools/record" clock "k8s.io/utils/clock/testing" "github.com/samber/lo" @@ -31,6 +32,7 @@ import ( "github.com/aws/karpenter-core/pkg/cloudprovider/fake" "github.com/aws/karpenter-core/pkg/controllers/machine/terminator" "github.com/aws/karpenter-core/pkg/controllers/termination" + "github.com/aws/karpenter-core/pkg/events" "github.com/aws/karpenter-core/pkg/operator/controller" "github.com/aws/karpenter-core/pkg/operator/scheme" "github.com/aws/karpenter-core/pkg/test" @@ -64,9 +66,8 @@ var _ = BeforeSuite(func() { env = test.NewEnvironment(scheme.Scheme, test.WithCRDs(apis.CRDs...)) cloudProvider := fake.NewCloudProvider() - eventRecorder := test.NewEventRecorder() - evictionQueue = terminator.NewEvictionQueue(ctx, env.KubernetesInterface.CoreV1(), eventRecorder) - terminationController = termination.NewController(env.Client, terminator.NewTerminator(fakeClock, env.Client, cloudProvider, evictionQueue), eventRecorder) + evictionQueue = terminator.NewEvictionQueue(ctx, env.KubernetesInterface.CoreV1(), events.NewRecorder(&record.FakeRecorder{})) + terminationController = termination.NewController(env.Client, terminator.NewTerminator(fakeClock, env.Client, cloudProvider, evictionQueue), events.NewRecorder(&record.FakeRecorder{})) }) var _ = AfterSuite(func() { diff --git a/pkg/events/suite_test.go b/pkg/events/suite_test.go index 1f4ed96330..77535f92af 100644 --- a/pkg/events/suite_test.go +++ b/pkg/events/suite_test.go @@ -16,12 +16,14 @@ package events_test import ( "fmt" + "sync" "testing" "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/client-go/util/flowcontrol" @@ -29,8 +31,39 @@ import ( "github.com/aws/karpenter-core/pkg/test" ) -var internalRecorder *test.InternalRecorder var eventRecorder events.Recorder +var internalRecorder *InternalRecorder + +type InternalRecorder struct { + mu sync.RWMutex + calls map[string]int +} + +func NewInternalRecorder() *InternalRecorder { + return &InternalRecorder{ + calls: map[string]int{}, + } +} + +func (i *InternalRecorder) Event(_ runtime.Object, _, reason, _ string) { + i.mu.Lock() + defer i.mu.Unlock() + i.calls[reason]++ +} + +func (i *InternalRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, _ ...interface{}) { + i.Event(object, eventtype, reason, messageFmt) +} + +func (i *InternalRecorder) AnnotatedEventf(object runtime.Object, _ map[string]string, eventtype, reason, messageFmt string, _ ...interface{}) { + i.Event(object, eventtype, reason, messageFmt) +} + +func (i *InternalRecorder) Calls(reason string) int { + i.mu.RLock() + defer i.mu.RUnlock() + return i.calls[reason] +} func TestRecorder(t *testing.T) { RegisterFailHandler(Fail) @@ -38,7 +71,7 @@ func TestRecorder(t *testing.T) { } var _ = BeforeEach(func() { - internalRecorder = test.NewInternalRecorder() + internalRecorder = NewInternalRecorder() eventRecorder = events.NewRecorder(internalRecorder) events.PodNominationRateLimiter = flowcontrol.NewTokenBucketRateLimiter(5, 10) }) diff --git a/pkg/test/eventrecorder.go b/pkg/test/eventrecorder.go deleted file mode 100644 index af4ed171ec..0000000000 --- a/pkg/test/eventrecorder.go +++ /dev/null @@ -1,143 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package test - -import ( - "regexp" - "sync" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/tools/record" - - "github.com/aws/karpenter-core/pkg/events" -) - -// Binding is a potential binding that was reported through event recording. -type Binding struct { - Pod *v1.Pod - Node *v1.Node -} - -var _ events.Recorder = (*EventRecorder)(nil) - -// EventRecorder is a mock event recorder that is used to facilitate testing. -type EventRecorder struct { - mu sync.RWMutex - bindings []Binding - calls map[string]int - events []events.Event -} - -func NewEventRecorder() *EventRecorder { - return &EventRecorder{ - calls: map[string]int{}, - } -} - -func (e *EventRecorder) Publish(evt events.Event) { - e.mu.Lock() - defer e.mu.Unlock() - e.events = append(e.events, evt) - - fakeNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "fake"}} - fakePod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "fake"}} - switch evt.Reason { - case events.NominatePod(fakePod, fakeNode).Reason: - var nodeName string - r := regexp.MustCompile(`Pod should schedule on (?P.*)`) - matches := r.FindStringSubmatch(evt.Message) - if len(matches) == 0 { - return - } - for i, name := range r.SubexpNames() { - if name == "NodeName" { - nodeName = matches[i] - break - } - } - - pod := evt.InvolvedObject.(*v1.Pod) - node := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}} // This is all we need for the binding - e.bindings = append(e.bindings, Binding{pod, node}) - } - e.calls[evt.Reason]++ -} - -func (e *EventRecorder) Calls(reason string) int { - e.mu.RLock() - defer e.mu.RUnlock() - return e.calls[reason] -} - -func (e *EventRecorder) Reset() { - e.ResetBindings() -} - -func (e *EventRecorder) ResetBindings() { - e.mu.Lock() - defer e.mu.Unlock() - e.bindings = nil -} - -func (e *EventRecorder) ForEachEvent(f func(evt events.Event)) { - e.mu.RLock() - defer e.mu.RUnlock() - for _, e := range e.events { - f(e) - } -} - -func (e *EventRecorder) ForEachBinding(f func(pod *v1.Pod, node *v1.Node)) { - e.mu.RLock() - defer e.mu.RUnlock() - for _, b := range e.bindings { - f(b.Pod, b.Node) - } -} - -var _ record.EventRecorder = (*InternalRecorder)(nil) - -type InternalRecorder struct { - mu sync.RWMutex - calls map[string]int -} - -func NewInternalRecorder() *InternalRecorder { - return &InternalRecorder{ - calls: map[string]int{}, - } -} - -func (i *InternalRecorder) Event(_ runtime.Object, _, reason, _ string) { - i.mu.Lock() - defer i.mu.Unlock() - i.calls[reason]++ -} - -func (i *InternalRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, _ ...interface{}) { - i.Event(object, eventtype, reason, messageFmt) -} - -func (i *InternalRecorder) AnnotatedEventf(object runtime.Object, _ map[string]string, eventtype, reason, messageFmt string, _ ...interface{}) { - i.Event(object, eventtype, reason, messageFmt) -} - -func (i *InternalRecorder) Calls(reason string) int { - i.mu.RLock() - defer i.mu.RUnlock() - return i.calls[reason] -} diff --git a/pkg/test/expectations/expectations.go b/pkg/test/expectations/expectations.go index deadbf380c..4a22f4d8b7 100644 --- a/pkg/test/expectations/expectations.go +++ b/pkg/test/expectations/expectations.go @@ -18,7 +18,6 @@ package expectations import ( "context" "fmt" - "math/rand" "reflect" "sync" "time" @@ -36,6 +35,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" "knative.dev/pkg/apis" "knative.dev/pkg/ptr" "sigs.k8s.io/controller-runtime/pkg/client" @@ -47,8 +47,7 @@ import ( "github.com/aws/karpenter-core/pkg/controllers/provisioning" "github.com/aws/karpenter-core/pkg/controllers/provisioning/scheduling" "github.com/aws/karpenter-core/pkg/controllers/state" - corecontroller "github.com/aws/karpenter-core/pkg/operator/controller" - "github.com/aws/karpenter-core/pkg/test" + "github.com/aws/karpenter-core/pkg/operator/injection" ) const ( @@ -213,52 +212,48 @@ func ExpectFinalizersRemoved(ctx context.Context, c client.Client, objectLists . } } -func ExpectProvisioned(ctx context.Context, c client.Client, cluster *state.Cluster, recorder *test.EventRecorder, controller corecontroller.Controller, - provisioner *provisioning.Provisioner, pods ...*v1.Pod) (result []*v1.Pod) { - - ExpectProvisionedNoBindingWithOffset(1, ctx, c, controller, provisioner, pods...) - - recorder.ForEachBinding(func(pod *v1.Pod, node *v1.Node) { - ExpectManualBindingWithOffset(1, ctx, c, pod, node) - }) - // reset bindings, so we don't try to bind these same pods again if a new provisioning is performed in the same test - recorder.ResetBindings() - - // Update objects after reconciling - for _, pod := range pods { - result = append(result, ExpectPodExistsWithOffset(1, ctx, c, pod.GetName(), pod.GetNamespace())) - Expect(cluster.UpdatePod(ctx, result[len(result)-1])).WithOffset(1).To(Succeed()) // track pod bindings +func ExpectProvisioned(ctx context.Context, c client.Client, cluster *state.Cluster, provisioner *provisioning.Provisioner, pods ...*v1.Pod) map[*v1.Pod]*v1.Node { + bindings := ExpectProvisionedNoBindingWithOffset(1, ctx, c, provisioner, pods...) + podNames := sets.NewString(lo.Map(pods, func(p *v1.Pod, _ int) string { return p.Name })...) + for pod, node := range bindings { + // Only bind the pods that are passed through + if podNames.Has(pod.Name) { + ExpectManualBindingWithOffset(1, ctx, c, pod, node) + ExpectWithOffset(1, cluster.UpdatePod(ctx, pod)).To(Succeed()) // track pod bindings + } } - return + return bindings } -func ExpectProvisionedNoBinding(ctx context.Context, c client.Client, controller corecontroller.Controller, provisioner *provisioning.Provisioner, pods ...*v1.Pod) (result []*v1.Pod) { - return ExpectProvisionedNoBindingWithOffset(1, ctx, c, controller, provisioner, pods...) +func ExpectProvisionedNoBinding(ctx context.Context, c client.Client, provisioner *provisioning.Provisioner, pods ...*v1.Pod) map[*v1.Pod]*v1.Node { + return ExpectProvisionedNoBindingWithOffset(1, ctx, c, provisioner, pods...) } -func ExpectProvisionedNoBindingWithOffset(offset int, ctx context.Context, c client.Client, controller corecontroller.Controller, provisioner *provisioning.Provisioner, pods ...*v1.Pod) (result []*v1.Pod) { +func ExpectProvisionedNoBindingWithOffset(offset int, ctx context.Context, c client.Client, provisioner *provisioning.Provisioner, pods ...*v1.Pod) map[*v1.Pod]*v1.Node { // Persist objects for _, pod := range pods { ExpectAppliedWithOffset(offset+1, ctx, c, pod) } - - // shuffle the pods to try to detect any issues where we rely on pod order within a batch, we shuffle a copy of - // the slice so we can return the provisioned pods in the same order that the test supplied them for consistency - unorderedPods := append([]*v1.Pod{}, pods...) - r := rand.New(rand.NewSource(GinkgoRandomSeed())) //nolint - r.Shuffle(len(unorderedPods), func(i, j int) { unorderedPods[i], unorderedPods[j] = unorderedPods[j], unorderedPods[i] }) - for _, pod := range unorderedPods { - _, _ = controller.Reconcile(ctx, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(pod)}) + // TODO: Check the error on the provisioner scheduling round + machines, nodes, _ := provisioner.Schedule(ctx) + bindings := map[*v1.Pod]*v1.Node{} + for _, m := range machines { + ctx = injection.WithNamespacedName(ctx, types.NamespacedName{Name: m.Labels[v1alpha5.ProvisionerNameLabelKey]}) + // TODO: Check the error on the provisioner launch + name, err := provisioner.Launch(ctx, m) + if err != nil { + return bindings + } + for _, pod := range m.Pods { + bindings[pod] = ExpectNodeExistsWithOffset(offset+1, ctx, c, name) + } } - - // TODO: Check the error on the provisioner reconcile - _, _ = provisioner.Reconcile(ctx, reconcile.Request{}) - - // Update objects after reconciling - for _, pod := range pods { - result = append(result, ExpectPodExistsWithOffset(offset+1, ctx, c, pod.GetName(), pod.GetNamespace())) + for _, node := range nodes { + for _, pod := range node.Pods { + bindings[pod] = node.Node.Node + } } - return + return bindings } func ExpectReconcileSucceeded(ctx context.Context, reconciler reconcile.Reconciler, key client.ObjectKey) reconcile.Result {