From b1007913b0071245998b9a59a901c375931ef3de Mon Sep 17 00:00:00 2001 From: Aleksandra Malinowska Date: Mon, 27 May 2024 11:09:47 +0200 Subject: [PATCH] review fixes --- .../provisioningrequest/besteffortatomic/provisioning_class.go | 2 +- .../provisioningrequest/orchestrator/orchestrator_test.go | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/cluster-autoscaler/provisioningrequest/besteffortatomic/provisioning_class.go b/cluster-autoscaler/provisioningrequest/besteffortatomic/provisioning_class.go index 9c901bc185c8..0c7091c92e56 100644 --- a/cluster-autoscaler/provisioningrequest/besteffortatomic/provisioning_class.go +++ b/cluster-autoscaler/provisioningrequest/besteffortatomic/provisioning_class.go @@ -107,7 +107,7 @@ func (o *bestEffortAtomicProvClass) Provision( conditions.AddOrUpdateCondition(pr, v1beta1.Provisioned, metav1.ConditionTrue, conditions.CapacityIsFoundReason, conditions.CapacityIsFoundMsg, metav1.Now()) if _, updateErr := o.client.UpdateProvisioningRequest(pr.ProvisioningRequest); updateErr != nil { klog.Errorf("failed to add Provisioned=true condition to ProvReq %s/%s, err: %v", pr.Namespace, pr.Name, updateErr) - return &status.ScaleUpStatus{Result: status.ScaleUpNotNeeded}, errors.NewAutoscalerError(errors.InternalError, "capacity available, but failed to admit workload: %s", updateErr.Error()) + return status.UpdateScaleUpError(&status.ScaleUpStatus{}, errors.NewAutoscalerError(errors.InternalError, "capacity available, but failed to admit workload: %s", updateErr.Error())) } return &status.ScaleUpStatus{Result: status.ScaleUpNotNeeded}, nil } diff --git a/cluster-autoscaler/provisioningrequest/orchestrator/orchestrator_test.go b/cluster-autoscaler/provisioningrequest/orchestrator/orchestrator_test.go index 052c7e1827f6..7ae1e5984680 100644 --- a/cluster-autoscaler/provisioningrequest/orchestrator/orchestrator_test.go +++ b/cluster-autoscaler/provisioningrequest/orchestrator/orchestrator_test.go @@ -264,6 +264,8 @@ func TestScaleUp(t *testing.T) { if !tc.err { assert.NoError(t, err) if tc.scaleUpResult != st.Result && len(st.PodsRemainUnschedulable) > 0 { + // We expected all pods to be scheduled, but some remain unschedulable. + // Let's add the reason groups were rejected to errors. This is useful for debugging. t.Errorf("noScaleUpInfo: %#v", st.PodsRemainUnschedulable[0].RejectedNodeGroups) } assert.Equal(t, tc.scaleUpResult, st.Result)