Skip to content

Commit

Permalink
PR Comments
Browse files Browse the repository at this point in the history
  • Loading branch information
ellistarn committed Oct 29, 2021
1 parent 624586d commit ad4ab5e
Show file tree
Hide file tree
Showing 9 changed files with 25 additions and 28 deletions.
20 changes: 11 additions & 9 deletions pkg/apis/provisioning/v1alpha5/provisioner.go
Original file line number Diff line number Diff line change
Expand Up @@ -152,12 +152,18 @@ func (r Requirements) WithPod(pod *v1.Pod) Requirements {
return r
}

// Consolidate combines In and NotIn requirements for each unique key, producing
// an equivalent minimal representation of the requirements. This is useful as
// requirements may be appended from a variety of sources and then consolidated.
// Caution: If a key has contains a `NotIn` operator without a corresponding
// `In` operator, the requirement will permanently be [] after consolidation. To
// avoid this, include the broadest `In` requirements before consolidating.
func (r Requirements) Consolidate() (requirements Requirements) {
for _, key := range r.Keys() {
requirements = append(requirements, v1.NodeSelectorRequirement{
Key: key,
Operator: v1.NodeSelectorOpIn,
Values: r.Requirement(key).List(),
Values: r.Requirement(key).UnsortedList(),
})
}
return requirements
Expand All @@ -168,7 +174,7 @@ func (r Requirements) CustomLabels() map[string]string {
for _, key := range r.Keys() {
if !WellKnownLabels.Has(key) {
if requirement := r.Requirement(key); len(requirement) > 0 {
labels[key] = requirement.List()[0]
labels[key] = requirement.UnsortedList()[0]
}
}
}
Expand All @@ -184,16 +190,16 @@ func (r Requirements) WellKnown() (requirements Requirements) {
return requirements
}

// GetLabels returns unique set of the label keys from the requirements
// Keys returns unique set of the label keys from the requirements
func (r Requirements) Keys() []string {
keys := sets.NewString()
for _, requirement := range r {
keys.Insert(requirement.Key)
}
return keys.List()
return keys.UnsortedList()
}

// Values for the provided key constrained by the requirements
// Requirements for the provided key, nil if unconstrained
func (r Requirements) Requirement(key string) sets.String {
var result sets.String
// OpIn
Expand All @@ -212,9 +218,5 @@ func (r Requirements) Requirement(key string) sets.String {
result = result.Difference(sets.NewString(requirement.Values...))
}
}
// Unconstrained
if result == nil {
return nil
}
return result
}
4 changes: 2 additions & 2 deletions pkg/cloudprovider/aws/instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ func (p *InstanceProvider) launchInstances(ctx context.Context, constraints *v1a
if capacityTypes := constraints.Requirements.Requirement(v1alpha1.CapacityTypeLabel); len(capacityTypes) == 0 {
return nil, fmt.Errorf("invariant violated, must contain at least one capacity type")
} else if len(capacityTypes) == 1 {
capacityType = capacityTypes.List()[0]
capacityType, _ = capacityTypes.PopAny()
}
// Get Launch Template Configs, which may differ due to GPU or Architecture requirements
launchTemplateConfigs, err := p.getLaunchTemplateConfigs(ctx, constraints, instanceTypes, capacityType)
Expand Down Expand Up @@ -264,7 +264,7 @@ func combineFleetErrors(errors []*ec2.CreateFleetError) (errs error) {
for _, err := range errors {
unique.Insert(fmt.Sprintf("%s: %s", aws.StringValue(err.ErrorCode), aws.StringValue(err.ErrorMessage)))
}
for _, errorCode := range unique.List() {
for errorCode := range unique {
errs = multierr.Append(errs, fmt.Errorf(errorCode))
}
return fmt.Errorf("with fleet error(s), %w", errs)
Expand Down
6 changes: 1 addition & 5 deletions pkg/cloudprovider/aws/instancetypes.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,6 @@ import (
"knative.dev/pkg/logging"
)

const (
allInstanceTypesKey = "all"
)

type InstanceTypeProvider struct {
ec2api ec2iface.EC2API
subnetProvider *SubnetProvider
Expand Down Expand Up @@ -61,7 +57,7 @@ func (p *InstanceTypeProvider) Get(ctx context.Context, constraints *v1alpha1.Co
if err != nil {
return nil, err
}
p.cache.SetDefault(allInstanceTypesKey, instanceTypes)
p.cache.SetDefault(fmt.Sprint(hash), instanceTypes)
logging.FromContext(ctx).Debugf("Discovered %d EC2 instance types", len(instanceTypes))
return instanceTypes, nil
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/cloudprovider/aws/subnets.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ func (s *SubnetProvider) getFilters(constraints *v1alpha1.Constraints) []*ec2.Fi
if zones := constraints.Requirements.Zones(); zones != nil {
filters = append(filters, &ec2.Filter{
Name: aws.String("availability-zone"),
Values: aws.StringSlice(zones.List()),
Values: aws.StringSlice(zones.UnsortedList()),
})
}
// Filter by selector
Expand Down
4 changes: 2 additions & 2 deletions pkg/cloudprovider/aws/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -356,8 +356,8 @@ var _ = Describe("Allocation", func() {
})
It("should default requirements", func() {
provisioner.SetDefaults(ctx)
Expect(provisioner.Spec.Requirements.Requirement(v1alpha1.CapacityTypeLabel).List()).To(ConsistOf(v1alpha1.CapacityTypeOnDemand))
Expect(provisioner.Spec.Requirements.Architectures().List()).To(ConsistOf(v1alpha5.ArchitectureAmd64))
Expect(provisioner.Spec.Requirements.Requirement(v1alpha1.CapacityTypeLabel).UnsortedList()).To(ConsistOf(v1alpha1.CapacityTypeOnDemand))
Expect(provisioner.Spec.Requirements.Architectures().UnsortedList()).To(ConsistOf(v1alpha5.ArchitectureAmd64))
})
})
Context("Validation", func() {
Expand Down
7 changes: 3 additions & 4 deletions pkg/cloudprovider/fake/cloudprovider.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,10 +36,9 @@ func (c *CloudProvider) Create(_ context.Context, constraints *v1alpha5.Constrai
err := make(chan error)
for i := 0; i < quantity; i++ {
name := strings.ToLower(randomdata.SillyName())
// Pick first instance type option
instance := instanceTypes[0]
// Pick first zone
zone := instance.Zones().Intersection(constraints.Requirements.Zones()).List()[0]
zone, _ := instance.Zones().Intersection(constraints.Requirements.Zones()).PopAny()
operatingSystem, _ := instance.OperatingSystems().PopAny()

go func() {
err <- bind(&v1.Node{
Expand All @@ -56,7 +55,7 @@ func (c *CloudProvider) Create(_ context.Context, constraints *v1alpha5.Constrai
Status: v1.NodeStatus{
NodeInfo: v1.NodeSystemInfo{
Architecture: instance.Architecture(),
OperatingSystem: instance.OperatingSystems().List()[0],
OperatingSystem: operatingSystem,
},
Allocatable: v1.ResourceList{
v1.ResourcePods: *instance.Pods(),
Expand Down
6 changes: 3 additions & 3 deletions pkg/controllers/allocation/scheduling/scheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,12 +106,12 @@ func globalRequirements(instanceTypes []cloudprovider.InstanceType) (requirement
}
for _, instanceType := range instanceTypes {
supported[v1.LabelInstanceTypeStable].Insert(instanceType.Name())
supported[v1.LabelTopologyZone].Insert(instanceType.Zones().List()...)
supported[v1.LabelTopologyZone].Insert(instanceType.Zones().UnsortedList()...)
supported[v1.LabelArchStable].Insert(instanceType.Architecture())
supported[v1.LabelOSStable].Insert(instanceType.OperatingSystems().List()...)
supported[v1.LabelOSStable].Insert(instanceType.OperatingSystems().UnsortedList()...)
}
for key, values := range supported {
requirements = append(requirements, v1.NodeSelectorRequirement{Key: key, Operator: v1.NodeSelectorOpIn, Values: values.List()})
requirements = append(requirements, v1.NodeSelectorRequirement{Key: key, Operator: v1.NodeSelectorOpIn, Values: values.UnsortedList()})
}
return requirements
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/allocation/scheduling/topology.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ func (t *Topology) computeHostnameTopology(topologyGroup *TopologyGroup) error {
// selection. For example, if a cloud provider or provisioner changes the viable
// set of nodes, topology calculations will rebalance the new set of zones.
func (t *Topology) computeZonalTopology(ctx context.Context, requirements v1alpha5.Requirements, topologyGroup *TopologyGroup) error {
topologyGroup.Register(requirements.Zones().List()...)
topologyGroup.Register(requirements.Zones().UnsortedList()...)
if err := t.countMatchingPods(ctx, topologyGroup); err != nil {
return fmt.Errorf("getting matching pods, %w", err)
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/allocation/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ var _ = Describe("Allocation", func() {
Expect(pod.Spec.NodeName).To(Equal(nodes.Items[0].Name))
}
})
It("should provision nodes for pods with supported node selectors", func() {
FIt("should provision nodes for pods with supported node selectors", func() {
schedulable := []client.Object{
// Constrained by provisioner
test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1alpha5.ProvisionerNameLabelKey: provisioner.Name}}),
Expand Down

0 comments on commit ad4ab5e

Please sign in to comment.