Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[HPA] Add targetCPUUtilization field to collector config #1066

Merged
merged 1 commit into from
Sep 23, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions apis/v1alpha1/opentelemetrycollector_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,13 +41,11 @@ type OpenTelemetryCollectorSpec struct {
// MaxReplicas sets an upper bound to the autoscaling feature. If MaxReplicas is set autoscaling is enabled.
// +optional
MaxReplicas *int32 `json:"maxReplicas,omitempty"`

// Autoscaler specifies the pod autoscaling configuration to use
// for the OpenTelemetryCollector workload.
//
// +optional
Autoscaler *AutoscalerSpec `json:"autoscaler,omitempty"`

// SecurityContext will be set as the container security context.
// +optional
SecurityContext *v1.SecurityContext `json:"securityContext,omitempty"`
Expand Down Expand Up @@ -216,6 +214,10 @@ type OpenTelemetryCollectorList struct {
type AutoscalerSpec struct {
// +optional
Behavior *autoscalingv2.HorizontalPodAutoscalerBehavior `json:"behavior,omitempty"`
// TargetCPUUtilization sets the target average CPU used across all replicas.
// If average CPU exceeds this value, the HPA will scale up. Defaults to 90 percent.
// +optional
TargetCPUUtilization *int32 `json:"targetCPUUtilization,omitempty"`
}

func init() {
Expand Down
10 changes: 10 additions & 0 deletions apis/v1alpha1/opentelemetrycollector_webhook.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,12 @@ func (r *OpenTelemetryCollector) Default() {
if r.Spec.TargetAllocator.Enabled && r.Spec.TargetAllocator.Replicas == nil {
r.Spec.TargetAllocator.Replicas = &one
}

// Set default targetCPUUtilization for autoscaler
if r.Spec.MaxReplicas != nil && r.Spec.Autoscaler.TargetCPUUtilization == nil {
defaultCPUTarget := int32(90)
r.Spec.Autoscaler.TargetCPUUtilization = &defaultCPUTarget
}
}

// +kubebuilder:webhook:verbs=create;update,path=/validate-opentelemetry-io-v1alpha1-opentelemetrycollector,mutating=false,failurePolicy=fail,groups=opentelemetry.io,resources=opentelemetrycollectors,versions=v1alpha1,name=vopentelemetrycollectorcreateupdate.kb.io,sideEffects=none,admissionReviewVersions=v1
Expand Down Expand Up @@ -141,6 +147,10 @@ func (r *OpenTelemetryCollector) validateCRDSpec() error {
return fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, scaleUp should be one or more")
}
}
if r.Spec.Autoscaler.TargetCPUUtilization != nil && (*r.Spec.Autoscaler.TargetCPUUtilization < int32(1) || *r.Spec.Autoscaler.TargetCPUUtilization > int32(99)) {
return fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, targetCPUUtilization should be greater than 0 and less than 100")
}

}

return nil
Expand Down
2 changes: 1 addition & 1 deletion apis/v1alpha1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
Expand Up @@ -182,6 +182,12 @@ spec:
type: integer
type: object
type: object
targetCPUUtilization:
description: TargetCPUUtilization sets the target average CPU
used across all replicas. If average CPU exceeds this value,
the HPA will scale up. Defaults to 90 percent.
format: int32
type: integer
type: object
config:
description: Config is the raw JSON to be used as the collector's
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,12 @@ spec:
type: integer
type: object
type: object
targetCPUUtilization:
description: TargetCPUUtilization sets the target average CPU
used across all replicas. If average CPU exceeds this value,
the HPA will scale up. Defaults to 90 percent.
format: int32
type: integer
type: object
config:
description: Config is the raw JSON to be used as the collector's
Expand Down
9 changes: 9 additions & 0 deletions docs/api.md
Original file line number Diff line number Diff line change
Expand Up @@ -1896,6 +1896,15 @@ Autoscaler specifies the pod autoscaling configuration to use for the OpenTeleme
HorizontalPodAutoscalerBehavior configures the scaling behavior of the target in both Up and Down directions (scaleUp and scaleDown fields respectively).<br/>
</td>
<td>false</td>
</tr><tr>
<td><b>targetCPUUtilization</b></td>
<td>integer</td>
<td>
TargetCPUUtilization sets the target average CPU used across all replicas. If average CPU exceeds this value, the HPA will scale up. Defaults to 90 percent.<br/>
<br/>
<i>Format</i>: int32<br/>
</td>
<td>false</td>
</tr></tbody>
</table>

Expand Down
8 changes: 3 additions & 5 deletions pkg/collector/horizontalpodautoscaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,16 +28,14 @@ import (
"github.com/open-telemetry/opentelemetry-operator/pkg/naming"
)

const defaultCPUTarget int32 = 90

func HorizontalPodAutoscaler(cfg config.Config, logger logr.Logger, otelcol v1alpha1.OpenTelemetryCollector) client.Object {
autoscalingVersion := cfg.AutoscalingVersion()

labels := Labels(otelcol, cfg.LabelsFilter())
labels["app.kubernetes.io/name"] = naming.Collector(otelcol)

annotations := Annotations(otelcol)
cpuTarget := defaultCPUTarget

var result client.Object

objectMeta := metav1.ObjectMeta{
Expand All @@ -54,7 +52,7 @@ func HorizontalPodAutoscaler(cfg config.Config, logger logr.Logger, otelcol v1al
Name: corev1.ResourceCPU,
Target: autoscalingv2beta2.MetricTarget{
Type: autoscalingv2beta2.UtilizationMetricType,
AverageUtilization: &cpuTarget,
AverageUtilization: otelcol.Spec.Autoscaler.TargetCPUUtilization,
},
},
}
Expand Down Expand Up @@ -87,7 +85,7 @@ func HorizontalPodAutoscaler(cfg config.Config, logger logr.Logger, otelcol v1al
Name: corev1.ResourceCPU,
Target: autoscalingv2.MetricTarget{
Type: autoscalingv2.UtilizationMetricType,
AverageUtilization: &cpuTarget,
AverageUtilization: otelcol.Spec.Autoscaler.TargetCPUUtilization,
},
},
}
Expand Down
4 changes: 4 additions & 0 deletions pkg/collector/horizontalpodautoscaler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ func TestHPA(t *testing.T) {

var minReplicas int32 = 3
var maxReplicas int32 = 5
var cpuUtilization int32 = 90

otelcol := v1alpha1.OpenTelemetryCollector{
ObjectMeta: metav1.ObjectMeta{
Expand All @@ -49,6 +50,9 @@ func TestHPA(t *testing.T) {
Spec: v1alpha1.OpenTelemetryCollectorSpec{
Replicas: &minReplicas,
MaxReplicas: &maxReplicas,
Autoscaler: &v1alpha1.AutoscalerSpec{
TargetCPUUtilization: &cpuUtilization,
},
},
}

Expand Down
2 changes: 2 additions & 0 deletions pkg/collector/reconcile/horizontalpodautoscaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -119,13 +119,15 @@ func setAutoscalerSpec(params Params, autoscalingVersion autodetect.AutoscalingV
} else {
updated.(*autoscalingv2beta2.HorizontalPodAutoscaler).Spec.MinReplicas = &one
}
updated.(*autoscalingv2beta2.HorizontalPodAutoscaler).Spec.Metrics[0].Resource.Target.AverageUtilization = params.Instance.Spec.Autoscaler.TargetCPUUtilization
} else {
updated.(*autoscalingv2.HorizontalPodAutoscaler).Spec.MaxReplicas = *params.Instance.Spec.MaxReplicas
if params.Instance.Spec.MinReplicas != nil {
updated.(*autoscalingv2.HorizontalPodAutoscaler).Spec.MinReplicas = params.Instance.Spec.MinReplicas
} else {
updated.(*autoscalingv2.HorizontalPodAutoscaler).Spec.MinReplicas = &one
}
updated.(*autoscalingv2.HorizontalPodAutoscaler).Spec.Metrics[0].Resource.Target.AverageUtilization = params.Instance.Spec.Autoscaler.TargetCPUUtilization
}
}
}
Expand Down
4 changes: 4 additions & 0 deletions pkg/collector/reconcile/horizontalpodautoscaler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,7 @@ func paramsWithHPA(autoscalingVersion autodetect.AutoscalingVersion) Params {

minReplicas := int32(3)
maxReplicas := int32(5)
cpuUtilization := int32(90)

mockAutoDetector := &mockAutoDetect{
HPAVersionFunc: func() (autodetect.AutoscalingVersion, error) {
Expand Down Expand Up @@ -147,6 +148,9 @@ func paramsWithHPA(autoscalingVersion autodetect.AutoscalingVersion) Params {
Config: string(configYAML),
Replicas: &minReplicas,
MaxReplicas: &maxReplicas,
Autoscaler: &v1alpha1.AutoscalerSpec{
TargetCPUUtilization: &cpuUtilization,
},
},
},
Scheme: testScheme,
Expand Down
17 changes: 15 additions & 2 deletions tests/e2e/autoscale/00-assert.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ metadata:
name: simplest-collector
status:
readyReplicas: 1

---
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
Expand All @@ -16,4 +15,18 @@ spec:
# This is not neccesarily exact. We really just want to wait until this is no longer <unknown>
status:
currentCPUUtilizationPercentage: 20

---
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
metadata:
name: simplest-set-utilization-collector
spec:
minReplicas: 1
maxReplicas: 2
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 50
35 changes: 35 additions & 0 deletions tests/e2e/autoscale/00-install.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -36,3 +36,38 @@ spec:
receivers: [otlp]
processors: []
exporters: [logging]
---
apiVersion: opentelemetry.io/v1alpha1
kind: OpenTelemetryCollector
metadata:
name: simplest-set-utilization
spec:
minReplicas: 1
maxReplicas: 2
autoscaler:
targetCPUUtilization: 50
resources:
limits:
cpu: 500m
memory: 128Mi
requests:
cpu: 5m
memory: 64Mi

config: |
receivers:
otlp:
protocols:
grpc:
http:
processors:

exporters:
logging:

service:
pipelines:
traces:
receivers: [otlp]
processors: []
exporters: [logging]