Skip to content

Commit

Permalink
fix spreadconstraints[i].MaxGroups Invalidation when scaleup replicas
Browse files Browse the repository at this point in the history
Signed-off-by: huone1 <[email protected]>
  • Loading branch information
huone1 committed Jan 27, 2022
1 parent e0e7a60 commit e7fceb1
Show file tree
Hide file tree
Showing 3 changed files with 19 additions and 14 deletions.
17 changes: 9 additions & 8 deletions pkg/scheduler/core/division_algorithm.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ func divideReplicasByResource(
} else if assignedReplicas < spec.Replicas {
// We need to enlarge the replicas in terms of the previous result (if exists).
// First scheduling is considered as a special kind of scaling up.
newTargetClusters, err := scaleUpScheduleByReplicaDivisionPreference(clusters, spec, preference, assignedReplicas)
newTargetClusters, err := scaleUpScheduleByReplicaDivisionPreference(clusters, spec, preference)
if err != nil {
return nil, fmt.Errorf("failed to scaleUp: %v", err)
}
Expand Down Expand Up @@ -208,22 +208,23 @@ func scaleUpScheduleByReplicaDivisionPreference(
clusters []*clusterv1alpha1.Cluster,
spec *workv1alpha2.ResourceBindingSpec,
preference policyv1alpha1.ReplicaDivisionPreference,
assignedReplicas int32,
) ([]workv1alpha2.TargetCluster, error) {
// Step 1: Find the clusters that have old replicas, so we can prefer to assign new replicas towards them.
scheduledClusterNames := findOutScheduledCluster(spec.Clusters, clusters)
scheduledClusterNames, scheduledClusters := findOutScheduledCluster(spec.Clusters, clusters)

// Step 2: Get how many replicas should be scheduled in this cycle and construct a new object if necessary
// Step 2: calculate the assigned Replicas in scheduledClusters
assignedReplicas := util.GetSumOfReplicas(scheduledClusters)
// Step 3: Get how many replicas should be scheduled in this cycle and construct a new object if necessary
newSpec := spec
if assignedReplicas > 0 {
newSpec = spec.DeepCopy()
newSpec.Replicas = spec.Replicas - assignedReplicas
}

// Step 3: Calculate available replicas of all candidates
// Step 4: Calculate available replicas of all candidates
clusterAvailableReplicas := calAvailableReplicas(clusters, newSpec)

// Step 4: Begin dividing.
// Step 5: Begin dividing.
// Only the new replicas are considered during this scheduler, the old replicas will not be moved.
// If not, the old replicas may be recreated which is not expected during scaling up.
// The parameter `scheduledClusterNames` is used to make sure that we assign new replicas to them preferentially
Expand All @@ -233,6 +234,6 @@ func scaleUpScheduleByReplicaDivisionPreference(
return result, err
}

// Step 5: Merge the result of previous and new results.
return util.MergeTargetClusters(spec.Clusters, result), nil
// Step 6: Merge the result of previous and new results.
return util.MergeTargetClusters(scheduledClusters, result), nil
}
6 changes: 3 additions & 3 deletions pkg/scheduler/core/division_algorithm_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -818,9 +818,9 @@ func Test_scaleScheduling(t *testing.T) {
preference: policyv1alpha1.ReplicaDivisionPreferenceAggregated,
},
want: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 7},
{Name: ClusterMember2, Replicas: 8},
{Name: ClusterMember4, Replicas: 9},
{Name: ClusterMember1, Replicas: 8},
{Name: ClusterMember3, Replicas: 6},
{Name: ClusterMember4, Replicas: 10},
},
wantErr: false,
},
Expand Down
10 changes: 7 additions & 3 deletions pkg/scheduler/core/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,11 +77,13 @@ func calAvailableReplicas(clusters []*clusterv1alpha1.Cluster, spec *workv1alpha

// findOutScheduledCluster will return a name set of clusters
// which are a part of `feasibleClusters` and have non-zero replicas.
func findOutScheduledCluster(tcs []workv1alpha2.TargetCluster, candidates []*clusterv1alpha1.Cluster) sets.String {
func findOutScheduledCluster(tcs []workv1alpha2.TargetCluster, candidates []*clusterv1alpha1.Cluster) (sets.String, []workv1alpha2.TargetCluster) {
validTarget := make([]workv1alpha2.TargetCluster, 0)
res := sets.NewString()
if len(tcs) == 0 {
return res
return res, validTarget
}

for _, targetCluster := range tcs {
// must have non-zero replicas
if targetCluster.Replicas <= 0 {
Expand All @@ -91,11 +93,13 @@ func findOutScheduledCluster(tcs []workv1alpha2.TargetCluster, candidates []*clu
for _, cluster := range candidates {
if targetCluster.Name == cluster.Name {
res.Insert(targetCluster.Name)
validTarget = append(validTarget, targetCluster)
break
}
}
}
return res

return res, validTarget
}

// resortClusterList is used to make sure scheduledClusterNames are in front of the other clusters in the list of
Expand Down

0 comments on commit e7fceb1

Please sign in to comment.