diff --git a/deploy/crds/jaegertracing.io_jaegers_crd.yaml b/deploy/crds/jaegertracing.io_jaegers_crd.yaml index 299962cf6..a792abc0a 100644 --- a/deploy/crds/jaegertracing.io_jaegers_crd.yaml +++ b/deploy/crds/jaegertracing.io_jaegers_crd.yaml @@ -5310,12 +5310,26 @@ spec: additionalProperties: type: string type: object + autoscale: + description: Autoscale turns on/off the autoscale feature. By default, + it's enabled if the Replicas field is not set. + type: boolean image: type: string labels: additionalProperties: type: string type: object + maxReplicas: + description: MaxReplicas sets an upper bound to the autoscaling + feature. When autoscaling is enabled and no value is provided, + a default value is used. + format: int32 + type: integer + minReplicas: + description: MinReplicas sets a lower bound to the autoscaling feature. + format: int32 + type: integer options: description: Options defines a common options parameter to the different structs diff --git a/deploy/examples/tracegen.yaml b/deploy/examples/tracegen.yaml new file mode 100644 index 000000000..370dcb287 --- /dev/null +++ b/deploy/examples/tracegen.yaml @@ -0,0 +1,41 @@ +# this is a deployment for the tracegen utility that is delivered with Jaeger +# use with care, as it generates quite some load in the current setting +# this deployment was especially designed to test the autoscaling capabilities +# and requires an instance named 'simple-prod'. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tracegen + annotations: + "sidecar.jaegertracing.io/inject": "simple-prod" +spec: + replicas: 10 + selector: + matchLabels: + app: tracegen + template: + metadata: + labels: + app: tracegen + spec: + containers: + - name: tracegen + image: jaegertracing/jaeger-tracegen:latest + args: + - -duration=30m + - -workers=10 + - name: jaeger-agent + image: jaegertracing/jaeger-agent:1.16.0 + args: + - --reporter.grpc.host-port=dns:///simple-prod-collector-headless.default:14250 + - --reporter.type=grpc + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + ports: + - containerPort: 6831 + name: jg-compact-trft + protocol: UDP diff --git a/deploy/olm-catalog/jaeger-operator/1.16.0/jaeger-operator.v1.16.0.clusterserviceversion.yaml b/deploy/olm-catalog/jaeger-operator/1.16.0/jaeger-operator.v1.16.0.clusterserviceversion.yaml index 541ca94a5..0d088681d 100644 --- a/deploy/olm-catalog/jaeger-operator/1.16.0/jaeger-operator.v1.16.0.clusterserviceversion.yaml +++ b/deploy/olm-catalog/jaeger-operator/1.16.0/jaeger-operator.v1.16.0.clusterserviceversion.yaml @@ -211,6 +211,12 @@ spec: - kafkausers verbs: - '*' + - apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - '*' serviceAccountName: jaeger-operator deployments: - name: jaeger-operator @@ -340,6 +346,12 @@ spec: - kafkausers verbs: - '*' + - apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - '*' serviceAccountName: jaeger-operator strategy: deployment installModes: diff --git a/deploy/role.yaml b/deploy/role.yaml index 5fa5dbc29..7d4a7a284 100644 --- a/deploy/role.yaml +++ b/deploy/role.yaml @@ -92,3 +92,9 @@ rules: - kafkausers verbs: - '*' +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - '*' diff --git a/pkg/apis/jaegertracing/v1/jaeger_types.go b/pkg/apis/jaegertracing/v1/jaeger_types.go index ed31a496d..f6f708624 100644 --- a/pkg/apis/jaegertracing/v1/jaeger_types.go +++ b/pkg/apis/jaegertracing/v1/jaeger_types.go @@ -279,10 +279,22 @@ type JaegerAllInOneSpec struct { // JaegerCollectorSpec defines the options to be used when deploying the collector // +k8s:openapi-gen=true type JaegerCollectorSpec struct { + // Autoscale turns on/off the autoscale feature. By default, it's enabled if the Replicas field is not set. + // +optional + Autoscale *bool `json:"autoscale,omitempty"` + // Replicas represents the number of replicas to create for this service. // +optional Replicas *int32 `json:"replicas,omitempty"` + // MinReplicas sets a lower bound to the autoscaling feature. + // +optional + MinReplicas *int32 `json:"minReplicas,omitempty"` + + // MaxReplicas sets an upper bound to the autoscaling feature. When autoscaling is enabled and no value is provided, a default value is used. + // +optional + MaxReplicas *int32 `json:"maxReplicas,omitempty"` + // +optional Image string `json:"image,omitempty"` diff --git a/pkg/apis/jaegertracing/v1/zz_generated.deepcopy.go b/pkg/apis/jaegertracing/v1/zz_generated.deepcopy.go index 328c56c84..a2e3c04d1 100644 --- a/pkg/apis/jaegertracing/v1/zz_generated.deepcopy.go +++ b/pkg/apis/jaegertracing/v1/zz_generated.deepcopy.go @@ -156,11 +156,26 @@ func (in *JaegerCassandraCreateSchemaSpec) DeepCopy() *JaegerCassandraCreateSche // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *JaegerCollectorSpec) DeepCopyInto(out *JaegerCollectorSpec) { *out = *in + if in.Autoscale != nil { + in, out := &in.Autoscale, &out.Autoscale + *out = new(bool) + **out = **in + } if in.Replicas != nil { in, out := &in.Replicas, &out.Replicas *out = new(int32) **out = **in } + if in.MinReplicas != nil { + in, out := &in.MinReplicas, &out.MinReplicas + *out = new(int32) + **out = **in + } + if in.MaxReplicas != nil { + in, out := &in.MaxReplicas, &out.MaxReplicas + *out = new(int32) + **out = **in + } in.Options.DeepCopyInto(&out.Options) in.JaegerCommonSpec.DeepCopyInto(&out.JaegerCommonSpec) return diff --git a/pkg/apis/jaegertracing/v1/zz_generated.openapi.go b/pkg/apis/jaegertracing/v1/zz_generated.openapi.go index e8e482653..3281daaf4 100644 --- a/pkg/apis/jaegertracing/v1/zz_generated.openapi.go +++ b/pkg/apis/jaegertracing/v1/zz_generated.openapi.go @@ -450,6 +450,13 @@ func schema_pkg_apis_jaegertracing_v1_JaegerCollectorSpec(ref common.ReferenceCa Description: "JaegerCollectorSpec defines the options to be used when deploying the collector", Type: []string{"object"}, Properties: map[string]spec.Schema{ + "autoscale": { + SchemaProps: spec.SchemaProps{ + Description: "Autoscale turns on/off the autoscale feature. By default, it's enabled if the Replicas field is not set.", + Type: []string{"boolean"}, + Format: "", + }, + }, "replicas": { SchemaProps: spec.SchemaProps{ Description: "Replicas represents the number of replicas to create for this service.", @@ -457,6 +464,20 @@ func schema_pkg_apis_jaegertracing_v1_JaegerCollectorSpec(ref common.ReferenceCa Format: "int32", }, }, + "minReplicas": { + SchemaProps: spec.SchemaProps{ + Description: "MinReplicas sets a lower bound to the autoscaling feature.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "maxReplicas": { + SchemaProps: spec.SchemaProps{ + Description: "MaxReplicas sets an upper bound to the autoscaling feature. When autoscaling is enabled and no value is provided, a default value is used.", + Type: []string{"integer"}, + Format: "int32", + }, + }, "image": { SchemaProps: spec.SchemaProps{ Type: []string{"string"}, diff --git a/pkg/controller/jaeger/horizontalpodautoscaler.go b/pkg/controller/jaeger/horizontalpodautoscaler.go new file mode 100644 index 000000000..b427ccea7 --- /dev/null +++ b/pkg/controller/jaeger/horizontalpodautoscaler.go @@ -0,0 +1,65 @@ +package jaeger + +import ( + "context" + + log "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel/global" + autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" + "sigs.k8s.io/controller-runtime/pkg/client" + + v1 "github.com/jaegertracing/jaeger-operator/pkg/apis/jaegertracing/v1" + "github.com/jaegertracing/jaeger-operator/pkg/inventory" + "github.com/jaegertracing/jaeger-operator/pkg/tracing" +) + +func (r *ReconcileJaeger) applyHorizontalPodAutoscalers(ctx context.Context, jaeger v1.Jaeger, desired []autoscalingv2beta2.HorizontalPodAutoscaler) error { + tracer := global.TraceProvider().GetTracer(v1.ReconciliationTracer) + ctx, span := tracer.Start(ctx, "applyHorizontalPodAutoscalers") + defer span.End() + + opts := []client.ListOption{ + client.InNamespace(jaeger.Namespace), + client.MatchingLabels(map[string]string{ + "app.kubernetes.io/instance": jaeger.Name, + "app.kubernetes.io/managed-by": "jaeger-operator", + }), + } + hpaList := &autoscalingv2beta2.HorizontalPodAutoscalerList{} + if err := r.client.List(ctx, hpaList, opts...); err != nil { + return tracing.HandleError(err, span) + } + + hpaInventory := inventory.ForHorizontalPodAutoscalers(hpaList.Items, desired) + for _, d := range hpaInventory.Create { + jaeger.Logger().WithFields(log.Fields{ + "hpa": d.Name, + "namespace": d.Namespace, + }).Debug("creating hpa") + if err := r.client.Create(ctx, &d); err != nil { + return tracing.HandleError(err, span) + } + } + + for _, d := range hpaInventory.Update { + jaeger.Logger().WithFields(log.Fields{ + "hpa": d.Name, + "namespace": d.Namespace, + }).Debug("updating hpa") + if err := r.client.Update(ctx, &d); err != nil { + return tracing.HandleError(err, span) + } + } + + for _, d := range hpaInventory.Delete { + jaeger.Logger().WithFields(log.Fields{ + "hpa": d.Name, + "namespace": d.Namespace, + }).Debug("deleting hpa") + if err := r.client.Delete(ctx, &d); err != nil { + return tracing.HandleError(err, span) + } + } + + return nil +} diff --git a/pkg/controller/jaeger/horizontalpodautoscaler_test.go b/pkg/controller/jaeger/horizontalpodautoscaler_test.go new file mode 100644 index 000000000..9cb5eabfa --- /dev/null +++ b/pkg/controller/jaeger/horizontalpodautoscaler_test.go @@ -0,0 +1,201 @@ +package jaeger + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + v1 "github.com/jaegertracing/jaeger-operator/pkg/apis/jaegertracing/v1" + "github.com/jaegertracing/jaeger-operator/pkg/strategy" +) + +func TestHorizontalPodAutoscalerCreate(t *testing.T) { + // prepare + nsn := types.NamespacedName{ + Name: "TestHorizontalPodAutoscalerCreate", + Namespace: "tenant1", + } + + objs := []runtime.Object{ + v1.NewJaeger(nsn), + } + + req := reconcile.Request{ + NamespacedName: nsn, + } + + r, cl := getReconciler(objs) + r.strategyChooser = func(ctx context.Context, jaeger *v1.Jaeger) strategy.S { + s := strategy.New().WithHorizontalPodAutoscaler([]autoscalingv2beta2.HorizontalPodAutoscaler{{ + ObjectMeta: metav1.ObjectMeta{ + Name: nsn.Name, + Namespace: nsn.Namespace, + }, + }}) + return s + } + + // test + res, err := r.Reconcile(req) + + // verify + assert.NoError(t, err) + assert.False(t, res.Requeue, "We don't requeue for now") + + persisted := &autoscalingv2beta2.HorizontalPodAutoscaler{} + persistedName := types.NamespacedName{ + Name: nsn.Name, + Namespace: nsn.Namespace, + } + err = cl.Get(context.Background(), persistedName, persisted) + assert.Equal(t, persistedName.Name, persisted.Name) + assert.NoError(t, err) +} + +func TestHorizontalPodAutoscalerUpdate(t *testing.T) { + // prepare + nsn := types.NamespacedName{ + Name: "TestHorizontalPodAutoscalerUpdate", + Namespace: "tenant1", + } + + orig := autoscalingv2beta2.HorizontalPodAutoscaler{} + orig.Name = nsn.Name + orig.Namespace = nsn.Namespace + orig.Annotations = map[string]string{"key": "value"} + orig.Labels = map[string]string{ + "app.kubernetes.io/instance": orig.Name, + "app.kubernetes.io/managed-by": "jaeger-operator", + } + + objs := []runtime.Object{ + v1.NewJaeger(nsn), + &orig, + } + + r, cl := getReconciler(objs) + r.strategyChooser = func(ctx context.Context, jaeger *v1.Jaeger) strategy.S { + depUpdated := autoscalingv2beta2.HorizontalPodAutoscaler{} + depUpdated.Name = orig.Name + depUpdated.Namespace = orig.Namespace + depUpdated.Annotations = map[string]string{"key": "new-value"} + + s := strategy.New().WithHorizontalPodAutoscaler([]autoscalingv2beta2.HorizontalPodAutoscaler{depUpdated}) + return s + } + + // test + _, err := r.Reconcile(reconcile.Request{NamespacedName: nsn}) + assert.NoError(t, err) + + // verify + persisted := &autoscalingv2beta2.HorizontalPodAutoscaler{} + persistedName := types.NamespacedName{ + Name: orig.Name, + Namespace: orig.Namespace, + } + err = cl.Get(context.Background(), persistedName, persisted) + assert.Equal(t, "new-value", persisted.Annotations["key"]) + assert.NoError(t, err) +} + +func TestHorizontalPodAutoscalerDelete(t *testing.T) { + // prepare + nsn := types.NamespacedName{ + Name: "TestHorizontalPodAutoscalerDelete", + } + + orig := autoscalingv2beta2.HorizontalPodAutoscaler{} + orig.Name = nsn.Name + orig.Labels = map[string]string{ + "app.kubernetes.io/instance": orig.Name, + "app.kubernetes.io/managed-by": "jaeger-operator", + } + + objs := []runtime.Object{ + v1.NewJaeger(nsn), + &orig, + } + + r, cl := getReconciler(objs) + r.strategyChooser = func(ctx context.Context, jaeger *v1.Jaeger) strategy.S { + return strategy.S{} + } + + // test + _, err := r.Reconcile(reconcile.Request{NamespacedName: nsn}) + assert.NoError(t, err) + + // verify + persisted := &autoscalingv2beta2.HorizontalPodAutoscaler{} + persistedName := types.NamespacedName{ + Name: orig.Name, + Namespace: orig.Namespace, + } + err = cl.Get(context.Background(), persistedName, persisted) + assert.Empty(t, persisted.Name) + assert.Error(t, err) // not found +} + +func TestHorizontalPodAutoscalerCreateExistingNameInAnotherNamespace(t *testing.T) { + // prepare + nsn := types.NamespacedName{ + Name: "TestHorizontalPodAutoscalerCreateExistingNameInAnotherNamespace", + Namespace: "tenant1", + } + nsnExisting := types.NamespacedName{ + Name: "TestHorizontalPodAutoscalerCreateExistingNameInAnotherNamespace", + Namespace: "tenant2", + } + + objs := []runtime.Object{ + v1.NewJaeger(nsn), + v1.NewJaeger(nsnExisting), + &autoscalingv2beta2.HorizontalPodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Name: nsnExisting.Name, + Namespace: nsnExisting.Namespace, + }, + }, + } + + req := reconcile.Request{ + NamespacedName: nsn, + } + + r, cl := getReconciler(objs) + r.strategyChooser = func(ctx context.Context, jaeger *v1.Jaeger) strategy.S { + s := strategy.New().WithHorizontalPodAutoscaler([]autoscalingv2beta2.HorizontalPodAutoscaler{{ + ObjectMeta: metav1.ObjectMeta{ + Name: nsn.Name, + Namespace: nsn.Namespace, + }, + }}) + return s + } + + // test + res, err := r.Reconcile(req) + + // verify + assert.NoError(t, err) + assert.False(t, res.Requeue, "We don't requeue for now") + + persisted := &autoscalingv2beta2.HorizontalPodAutoscaler{} + err = cl.Get(context.Background(), nsn, persisted) + assert.NoError(t, err) + assert.Equal(t, nsn.Name, persisted.Name) + assert.Equal(t, nsn.Namespace, persisted.Namespace) + + persistedExisting := &autoscalingv2beta2.HorizontalPodAutoscaler{} + err = cl.Get(context.Background(), nsnExisting, persistedExisting) + assert.NoError(t, err) + assert.Equal(t, nsnExisting.Name, persistedExisting.Name) + assert.Equal(t, nsnExisting.Namespace, persistedExisting.Namespace) +} diff --git a/pkg/controller/jaeger/jaeger_controller.go b/pkg/controller/jaeger/jaeger_controller.go index dc88b8d93..76aed71c2 100644 --- a/pkg/controller/jaeger/jaeger_controller.go +++ b/pkg/controller/jaeger/jaeger_controller.go @@ -341,5 +341,11 @@ func (r *ReconcileJaeger) apply(ctx context.Context, jaeger v1.Jaeger, str strat } } + if err := r.applyHorizontalPodAutoscalers(ctx, jaeger, str.HorizontalPodAutoscalers()); err != nil { + // we don't want to fail the whole reconciliation when this fails + jaeger.Logger().WithError(tracing.HandleError(err, span)).Warn("failed to reconcile pod autoscalers") + return jaeger, nil + } + return jaeger, nil } diff --git a/pkg/deployment/collector.go b/pkg/deployment/collector.go index 835ebf49d..7f0a24ca0 100644 --- a/pkg/deployment/collector.go +++ b/pkg/deployment/collector.go @@ -6,6 +6,7 @@ import ( "strconv" appsv1 "k8s.io/api/apps/v1" + autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -18,6 +19,14 @@ import ( "github.com/jaegertracing/jaeger-operator/pkg/util" ) +const ( + // we need to have an upper bound, and 100 seems like a "good" max value + defaultMaxReplicas = int32(100) + + // for both memory and cpu + defaultAvgUtilization = int32(90) +) + // Collector builds pods for jaegertracing/jaeger-collector type Collector struct { jaeger *v1.Jaeger @@ -190,6 +199,88 @@ func (c *Collector) Services() []*corev1.Service { return service.NewCollectorServices(c.jaeger, c.labels()) } +// Autoscalers returns a list of HPAs based on this collector +func (c *Collector) Autoscalers() []autoscalingv2beta2.HorizontalPodAutoscaler { + // fixed number of replicas is explicitly set, do not auto scale + if c.jaeger.Spec.Collector.Replicas != nil { + return []autoscalingv2beta2.HorizontalPodAutoscaler{} + } + + // explicitly disabled, do not auto scale + if c.jaeger.Spec.Collector.Autoscale != nil && *c.jaeger.Spec.Collector.Autoscale == false { + return []autoscalingv2beta2.HorizontalPodAutoscaler{} + } + + maxReplicas := int32(-1) // unset, or invalid value + + if nil != c.jaeger.Spec.Collector.MaxReplicas { + maxReplicas = *c.jaeger.Spec.Collector.MaxReplicas + } + if maxReplicas < 0 { + maxReplicas = defaultMaxReplicas + } + + labels := c.labels() + labels["app.kubernetes.io/component"] = "hpa-collector" + baseCommonSpec := v1.JaegerCommonSpec{ + Labels: labels, + } + + avgUtilization := defaultAvgUtilization + trueVar := true + commonSpec := util.Merge([]v1.JaegerCommonSpec{c.jaeger.Spec.Collector.JaegerCommonSpec, c.jaeger.Spec.JaegerCommonSpec, baseCommonSpec}) + + // scale up when either CPU or memory is above 90% + return []autoscalingv2beta2.HorizontalPodAutoscaler{{ + ObjectMeta: metav1.ObjectMeta{ + Name: c.name(), + Namespace: c.jaeger.Namespace, + Labels: commonSpec.Labels, + Annotations: commonSpec.Annotations, + OwnerReferences: []metav1.OwnerReference{ + metav1.OwnerReference{ + APIVersion: c.jaeger.APIVersion, + Kind: c.jaeger.Kind, + Name: c.jaeger.Name, + UID: c.jaeger.UID, + Controller: &trueVar, + }, + }, + }, + Spec: autoscalingv2beta2.HorizontalPodAutoscalerSpec{ + ScaleTargetRef: autoscalingv2beta2.CrossVersionObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: c.name(), + }, + MinReplicas: c.jaeger.Spec.Collector.MinReplicas, + MaxReplicas: maxReplicas, + Metrics: []autoscalingv2beta2.MetricSpec{ + { + Type: autoscalingv2beta2.ResourceMetricSourceType, + Resource: &autoscalingv2beta2.ResourceMetricSource{ + Name: corev1.ResourceCPU, + Target: autoscalingv2beta2.MetricTarget{ + Type: autoscalingv2beta2.UtilizationMetricType, + AverageUtilization: &avgUtilization, + }, + }, + }, + { + Type: autoscalingv2beta2.ResourceMetricSourceType, + Resource: &autoscalingv2beta2.ResourceMetricSource{ + Name: corev1.ResourceMemory, + Target: autoscalingv2beta2.MetricTarget{ + Type: autoscalingv2beta2.UtilizationMetricType, + AverageUtilization: &avgUtilization, + }, + }, + }, + }, + }, + }} +} + func (c *Collector) labels() map[string]string { return map[string]string{ "app": "jaeger", // kept for backwards compatibility, remove by version 2.0 diff --git a/pkg/deployment/collector_test.go b/pkg/deployment/collector_test.go index 3ec29bc41..d08ee9549 100644 --- a/pkg/deployment/collector_test.go +++ b/pkg/deployment/collector_test.go @@ -21,7 +21,7 @@ func init() { func TestNegativeReplicas(t *testing.T) { size := int32(-1) - jaeger := v1.NewJaeger(types.NamespacedName{Name: "TestNegativeReplicas"}) + jaeger := v1.NewJaeger(types.NamespacedName{Name: "my-instance"}) jaeger.Spec.Collector.Replicas = &size collector := NewCollector(jaeger) @@ -30,7 +30,7 @@ func TestNegativeReplicas(t *testing.T) { } func TestDefaultSize(t *testing.T) { - jaeger := v1.NewJaeger(types.NamespacedName{Name: "TestDefaultSize"}) + jaeger := v1.NewJaeger(types.NamespacedName{Name: "my-instance"}) collector := NewCollector(jaeger) dep := collector.Get() @@ -39,7 +39,7 @@ func TestDefaultSize(t *testing.T) { func TestReplicaSize(t *testing.T) { size := int32(0) - jaeger := v1.NewJaeger(types.NamespacedName{Name: "TestReplicaSize"}) + jaeger := v1.NewJaeger(types.NamespacedName{Name: "my-instance"}) jaeger.Spec.Collector.Replicas = &size collector := NewCollector(jaeger) @@ -48,13 +48,13 @@ func TestReplicaSize(t *testing.T) { } func TestName(t *testing.T) { - collector := NewCollector(v1.NewJaeger(types.NamespacedName{Name: "TestName"})) + collector := NewCollector(v1.NewJaeger(types.NamespacedName{Name: "my-instance"})) dep := collector.Get() - assert.Equal(t, "TestName-collector", dep.ObjectMeta.Name) + assert.Equal(t, "my-instance-collector", dep.ObjectMeta.Name) } func TestCollectorServices(t *testing.T) { - collector := NewCollector(v1.NewJaeger(types.NamespacedName{Name: "TestName"})) + collector := NewCollector(v1.NewJaeger(types.NamespacedName{Name: "my-instance"})) svcs := collector.Services() assert.Len(t, svcs, 2) // headless and cluster IP } @@ -86,7 +86,7 @@ func TestDefaultCollectorImage(t *testing.T) { } func TestCollectorAnnotations(t *testing.T) { - jaeger := v1.NewJaeger(types.NamespacedName{Name: "TestCollectorAnnotations"}) + jaeger := v1.NewJaeger(types.NamespacedName{Name: "my-instance"}) jaeger.Spec.Annotations = map[string]string{ "name": "operator", "hello": "jaeger", @@ -107,7 +107,7 @@ func TestCollectorAnnotations(t *testing.T) { } func TestCollectorLabels(t *testing.T) { - jaeger := v1.NewJaeger(types.NamespacedName{Name: "TestCollectorLabels"}) + jaeger := v1.NewJaeger(types.NamespacedName{Name: "my-instance"}) jaeger.Spec.Labels = map[string]string{ "name": "operator", "hello": "jaeger", @@ -126,7 +126,7 @@ func TestCollectorLabels(t *testing.T) { } func TestCollectorSecrets(t *testing.T) { - jaeger := v1.NewJaeger(types.NamespacedName{Name: "TestCollectorSecrets"}) + jaeger := v1.NewJaeger(types.NamespacedName{Name: "my-instance"}) secret := "mysecret" jaeger.Spec.Storage.SecretName = secret @@ -137,7 +137,7 @@ func TestCollectorSecrets(t *testing.T) { } func TestCollectorVolumeMountsWithVolumes(t *testing.T) { - name := "TestCollectorVolumeMountsWithVolumes" + name := "my-instance" globalVolumes := []corev1.Volume{ { @@ -184,7 +184,7 @@ func TestCollectorVolumeMountsWithVolumes(t *testing.T) { } func TestCollectorMountGlobalVolumes(t *testing.T) { - name := "TestCollectorMountGlobalVolumes" + name := "my-instance" globalVolumes := []corev1.Volume{ { @@ -212,7 +212,7 @@ func TestCollectorMountGlobalVolumes(t *testing.T) { } func TestCollectorVolumeMountsWithSameName(t *testing.T) { - name := "TestCollectorVolumeMountsWithSameName" + name := "my-instance" globalVolumeMounts := []corev1.VolumeMount{ { @@ -240,7 +240,7 @@ func TestCollectorVolumeMountsWithSameName(t *testing.T) { } func TestCollectorVolumeWithSameName(t *testing.T) { - name := "TestCollectorVolumeWithSameName" + name := "my-instance" globalVolumes := []corev1.Volume{ { @@ -268,7 +268,7 @@ func TestCollectorVolumeWithSameName(t *testing.T) { } func TestCollectorResources(t *testing.T) { - jaeger := v1.NewJaeger(types.NamespacedName{Name: "TestCollectorResources"}) + jaeger := v1.NewJaeger(types.NamespacedName{Name: "my-instance"}) jaeger.Spec.Resources = corev1.ResourceRequirements{ Limits: corev1.ResourceList{ corev1.ResourceLimitsCPU: *resource.NewQuantity(1024, resource.BinarySI), @@ -302,7 +302,7 @@ func TestCollectorResources(t *testing.T) { } func TestCollectorStandardLabels(t *testing.T) { - c := NewCollector(v1.NewJaeger(types.NamespacedName{Name: "TestCollectorStandardLabels"})) + c := NewCollector(v1.NewJaeger(types.NamespacedName{Name: "my-instance"})) dep := c.Get() assert.Equal(t, "jaeger-operator", dep.Spec.Template.Labels["app.kubernetes.io/managed-by"]) assert.Equal(t, "collector", dep.Spec.Template.Labels["app.kubernetes.io/component"]) @@ -313,7 +313,7 @@ func TestCollectorStandardLabels(t *testing.T) { func TestCollectorWithDirectStorageType(t *testing.T) { jaeger := &v1.Jaeger{ ObjectMeta: metav1.ObjectMeta{ - Name: "TestCollectorWithDirectStorageType", + Name: "my-instance", }, Spec: v1.JaegerSpec{ Storage: v1.JaegerStorageSpec{ @@ -345,7 +345,7 @@ func TestCollectorWithDirectStorageType(t *testing.T) { func TestCollectorWithKafkaStorageType(t *testing.T) { jaeger := &v1.Jaeger{ ObjectMeta: metav1.ObjectMeta{ - Name: "TestCollectorWithIngesterStorageType", + Name: "my-instance", }, Spec: v1.JaegerSpec{ Strategy: v1.DeploymentStrategyStreaming, @@ -385,7 +385,7 @@ func TestCollectorWithKafkaStorageType(t *testing.T) { func TestCollectorWithIngesterNoOptionsStorageType(t *testing.T) { jaeger := &v1.Jaeger{ ObjectMeta: metav1.ObjectMeta{ - Name: "TestCollectorWithIngesterNoOptionsStorageType", + Name: "my-instance", }, Spec: v1.JaegerSpec{ Strategy: v1.DeploymentStrategyStreaming, @@ -417,7 +417,7 @@ func TestCollectorWithIngesterNoOptionsStorageType(t *testing.T) { } func TestCollectorOrderOfArguments(t *testing.T) { - jaeger := v1.NewJaeger(types.NamespacedName{Name: "TestCollectorOrderOfArguments"}) + jaeger := v1.NewJaeger(types.NamespacedName{Name: "my-instance"}) jaeger.Spec.Collector.Options = v1.NewOptions(map[string]interface{}{ "b-option": "b-value", "a-option": "a-value", @@ -436,3 +436,53 @@ func TestCollectorOrderOfArguments(t *testing.T) { // the following are added automatically assert.True(t, strings.HasPrefix(dep.Spec.Template.Spec.Containers[0].Args[3], "--sampling.strategies-file")) } + +func TestAutoscalersOnByDefault(t *testing.T) { + // prepare + jaeger := v1.NewJaeger(types.NamespacedName{Name: "my-instance"}) + c := NewCollector(jaeger) + + // test + a := c.Autoscalers() + + // verify + assert.Len(t, a, 1) + assert.Len(t, a[0].Spec.Metrics, 2) + + assert.Contains(t, []corev1.ResourceName{a[0].Spec.Metrics[0].Resource.Name, a[0].Spec.Metrics[1].Resource.Name}, corev1.ResourceCPU) + assert.Contains(t, []corev1.ResourceName{a[0].Spec.Metrics[0].Resource.Name, a[0].Spec.Metrics[1].Resource.Name}, corev1.ResourceMemory) + + assert.Equal(t, int32(90), *a[0].Spec.Metrics[0].Resource.Target.AverageUtilization) + assert.Equal(t, int32(90), *a[0].Spec.Metrics[1].Resource.Target.AverageUtilization) +} + +func TestAutoscalersDisabledByExplicitReplicaSize(t *testing.T) { + // prepare + tests := []int32{int32(0), int32(1)} + + for _, test := range tests { + jaeger := v1.NewJaeger(types.NamespacedName{Name: "my-instance"}) + jaeger.Spec.Collector.Replicas = &test + c := NewCollector(jaeger) + + // test + a := c.Autoscalers() + + // verify + assert.Len(t, a, 0) + } +} + +func TestAutoscalersDisabledByExplicitOption(t *testing.T) { + // prepare + disabled := false + jaeger := v1.NewJaeger(types.NamespacedName{Name: "my-instance"}) + jaeger.Spec.Collector.Autoscale = &disabled + c := NewCollector(jaeger) + + // test + a := c.Autoscalers() + + // verify + assert.Len(t, a, 0) +} diff --git a/pkg/inventory/horizontalpodautoscaler.go b/pkg/inventory/horizontalpodautoscaler.go new file mode 100644 index 000000000..c418c662d --- /dev/null +++ b/pkg/inventory/horizontalpodautoscaler.go @@ -0,0 +1,68 @@ +package inventory + +import ( + "fmt" + + autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" + + "github.com/jaegertracing/jaeger-operator/pkg/util" +) + +// HorizontalPodAutoscaler represents the HorizontalPodAutoscaler inventory based on the current and desired states +type HorizontalPodAutoscaler struct { + Create []autoscalingv2beta2.HorizontalPodAutoscaler + Update []autoscalingv2beta2.HorizontalPodAutoscaler + Delete []autoscalingv2beta2.HorizontalPodAutoscaler +} + +// ForHorizontalPodAutoscalers builds a new HorizontalPodAutoscaler inventory based on the existing and desired states +func ForHorizontalPodAutoscalers(existing []autoscalingv2beta2.HorizontalPodAutoscaler, desired []autoscalingv2beta2.HorizontalPodAutoscaler) HorizontalPodAutoscaler { + update := []autoscalingv2beta2.HorizontalPodAutoscaler{} + mcreate := hpaMap(desired) + mdelete := hpaMap(existing) + + for k, v := range mcreate { + if t, ok := mdelete[k]; ok { + tp := t.DeepCopy() + util.InitObjectMeta(tp) + + // we can't blindly DeepCopyInto, so, we select what we bring from the new to the old object + tp.Spec = v.Spec + tp.ObjectMeta.OwnerReferences = v.ObjectMeta.OwnerReferences + + for k, v := range v.ObjectMeta.Annotations { + tp.ObjectMeta.Annotations[k] = v + } + + for k, v := range v.ObjectMeta.Labels { + tp.ObjectMeta.Labels[k] = v + } + + update = append(update, *tp) + delete(mcreate, k) + delete(mdelete, k) + } + } + + return HorizontalPodAutoscaler{ + Create: hpaList(mcreate), + Update: update, + Delete: hpaList(mdelete), + } +} + +func hpaMap(hpas []autoscalingv2beta2.HorizontalPodAutoscaler) map[string]autoscalingv2beta2.HorizontalPodAutoscaler { + m := map[string]autoscalingv2beta2.HorizontalPodAutoscaler{} + for _, d := range hpas { + m[fmt.Sprintf("%s.%s", d.Namespace, d.Name)] = d + } + return m +} + +func hpaList(m map[string]autoscalingv2beta2.HorizontalPodAutoscaler) []autoscalingv2beta2.HorizontalPodAutoscaler { + l := []autoscalingv2beta2.HorizontalPodAutoscaler{} + for _, v := range m { + l = append(l, v) + } + return l +} diff --git a/pkg/inventory/horizontalpodautoscaler_test.go b/pkg/inventory/horizontalpodautoscaler_test.go new file mode 100644 index 000000000..8418ddf88 --- /dev/null +++ b/pkg/inventory/horizontalpodautoscaler_test.go @@ -0,0 +1,108 @@ +package inventory + +import ( + "testing" + + "github.com/stretchr/testify/assert" + autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/jaegertracing/jaeger-operator/pkg/util" +) + +func TestHorizontalPodAutoscalerInventory(t *testing.T) { + toCreate := autoscalingv2beta2.HorizontalPodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Name: "to-create", + Namespace: "tenant1", + }, + } + toUpdate := autoscalingv2beta2.HorizontalPodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Name: "to-update", + Namespace: "tenant1", + }, + Spec: autoscalingv2beta2.HorizontalPodAutoscalerSpec{ + MaxReplicas: 1, + }, + } + updated := autoscalingv2beta2.HorizontalPodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Name: "to-update", + Namespace: "tenant1", + Annotations: map[string]string{"gopher": "jaeger"}, + Labels: map[string]string{"gopher": "jaeger"}, + }, + Spec: autoscalingv2beta2.HorizontalPodAutoscalerSpec{ + MaxReplicas: 2, + }, + } + toDelete := autoscalingv2beta2.HorizontalPodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Name: "to-delete", + Namespace: "tenant1", + }, + } + + existing := []autoscalingv2beta2.HorizontalPodAutoscaler{toUpdate, toDelete} + desired := []autoscalingv2beta2.HorizontalPodAutoscaler{updated, toCreate} + + inv := ForHorizontalPodAutoscalers(existing, desired) + assert.Len(t, inv.Create, 1) + assert.Equal(t, "to-create", inv.Create[0].Name) + + assert.Len(t, inv.Update, 1) + assert.Equal(t, "to-update", inv.Update[0].Name) + assert.Equal(t, int32(2), inv.Update[0].Spec.MaxReplicas) + + assert.Len(t, inv.Delete, 1) + assert.Equal(t, "to-delete", inv.Delete[0].Name) +} + +func TestHorizontalPodAutoscalerInventoryWithSameNameInstances(t *testing.T) { + create := []autoscalingv2beta2.HorizontalPodAutoscaler{{ + ObjectMeta: metav1.ObjectMeta{ + Name: "to-create", + Namespace: "tenant1", + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Name: "to-create", + Namespace: "tenant2", + }, + }} + + inv := ForHorizontalPodAutoscalers([]autoscalingv2beta2.HorizontalPodAutoscaler{}, create) + assert.Len(t, inv.Create, 2) + assert.Contains(t, create, create[0]) + assert.Contains(t, create, create[1]) + assert.Len(t, inv.Update, 0) + assert.Len(t, inv.Delete, 0) +} + +func TestHorizontalPodAutoscalerInventoryNewWithSameNameAsExisting(t *testing.T) { + create := autoscalingv2beta2.HorizontalPodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Name: "to-create", + Namespace: "tenant1", + }, + } + + existing := []autoscalingv2beta2.HorizontalPodAutoscaler{{ + ObjectMeta: metav1.ObjectMeta{ + Name: "to-create", + Namespace: "tenant2", + }, + }} + + util.InitObjectMeta(&existing[0]) + inv := ForHorizontalPodAutoscalers(existing, append(existing, create)) + + assert.Len(t, inv.Create, 1) + assert.Equal(t, inv.Create[0], create) + + assert.Len(t, inv.Update, 1) + assert.Equal(t, inv.Update[0], existing[0]) + + assert.Len(t, inv.Delete, 0) +} diff --git a/pkg/strategy/all_in_one_test.go b/pkg/strategy/all_in_one_test.go index 003622958..8e15ee034 100644 --- a/pkg/strategy/all_in_one_test.go +++ b/pkg/strategy/all_in_one_test.go @@ -67,6 +67,12 @@ func TestDelegateAllInOneDependencies(t *testing.T) { assert.Equal(t, c.Dependencies(), storage.Dependencies(j)) } +func TestNoAutoscaleForAllInOne(t *testing.T) { + j := v1.NewJaeger(types.NamespacedName{Name: "my-instance"}) + c := newAllInOneStrategy(context.Background(), j) + assert.Len(t, c.HorizontalPodAutoscalers(), 0) +} + func assertDeploymentsAndServicesForAllInOne(t *testing.T, name string, s S, hasDaemonSet bool, hasOAuthProxy bool, hasConfigMap bool) { // TODO(jpkroehling): this func deserves a refactoring already diff --git a/pkg/strategy/production.go b/pkg/strategy/production.go index 6af0938d5..c2ac5301d 100644 --- a/pkg/strategy/production.go +++ b/pkg/strategy/production.go @@ -77,6 +77,9 @@ func newProductionStrategy(ctx context.Context, jaeger *v1.Jaeger) S { } } + // add autoscalers + c.horizontalPodAutoscalers = collector.Autoscalers() + if isBoolTrue(jaeger.Spec.Storage.Dependencies.Enabled) { if cronjob.SupportedStorage(jaeger.Spec.Storage.Type) { c.cronJobs = append(c.cronJobs, *cronjob.CreateSparkDependencies(jaeger)) diff --git a/pkg/strategy/production_test.go b/pkg/strategy/production_test.go index 0e7bf291c..28cffeb18 100644 --- a/pkg/strategy/production_test.go +++ b/pkg/strategy/production_test.go @@ -113,6 +113,12 @@ func TestDelegateProductionDependencies(t *testing.T) { assert.Equal(t, c.Dependencies(), storage.Dependencies(j)) } +func TestAutoscaleForProduction(t *testing.T) { + j := v1.NewJaeger(types.NamespacedName{Name: "my-instance"}) + c := newProductionStrategy(context.Background(), j) + assert.Len(t, c.HorizontalPodAutoscalers(), 1) +} + func assertDeploymentsAndServicesForProduction(t *testing.T, name string, s S, hasDaemonSet bool, hasOAuthProxy bool, hasConfigMap bool) { expectedNumObjs := 7 diff --git a/pkg/strategy/strategy.go b/pkg/strategy/strategy.go index f397aa92e..7fe03ec84 100644 --- a/pkg/strategy/strategy.go +++ b/pkg/strategy/strategy.go @@ -3,6 +3,7 @@ package strategy import ( osv1 "github.com/openshift/api/route/v1" appsv1 "k8s.io/api/apps/v1" + autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" batchv1 "k8s.io/api/batch/v1" batchv1beta1 "k8s.io/api/batch/v1beta1" corev1 "k8s.io/api/core/v1" @@ -16,21 +17,22 @@ import ( // S knows what type of deployments to build based on a given spec type S struct { - typ v1.DeploymentStrategy - accounts []corev1.ServiceAccount - clusterRoleBindings []rbac.ClusterRoleBinding - configMaps []corev1.ConfigMap - cronJobs []batchv1beta1.CronJob - daemonSets []appsv1.DaemonSet - dependencies []batchv1.Job - deployments []appsv1.Deployment - elasticsearches []esv1.Elasticsearch - ingresses []v1beta1.Ingress - kafkas []kafkav1beta1.Kafka - kafkaUsers []kafkav1beta1.KafkaUser - routes []osv1.Route - services []corev1.Service - secrets []corev1.Secret + typ v1.DeploymentStrategy + accounts []corev1.ServiceAccount + clusterRoleBindings []rbac.ClusterRoleBinding + configMaps []corev1.ConfigMap + cronJobs []batchv1beta1.CronJob + daemonSets []appsv1.DaemonSet + dependencies []batchv1.Job + deployments []appsv1.Deployment + elasticsearches []esv1.Elasticsearch + horizontalPodAutoscalers []autoscalingv2beta2.HorizontalPodAutoscaler + ingresses []v1beta1.Ingress + kafkas []kafkav1beta1.Kafka + kafkaUsers []kafkav1beta1.KafkaUser + routes []osv1.Route + services []corev1.Service + secrets []corev1.Secret } // New constructs a new strategy from scratch @@ -97,6 +99,12 @@ func (s S) WithIngresses(i []v1beta1.Ingress) S { return s } +// WithHorizontalPodAutoscaler returns the strategy with the given list of HPAs +func (s S) WithHorizontalPodAutoscaler(i []autoscalingv2beta2.HorizontalPodAutoscaler) S { + s.horizontalPodAutoscalers = i + return s +} + // WithRoutes returns the strategy with the given list of routes func (s S) WithRoutes(r []osv1.Route) S { s.routes = r @@ -167,6 +175,11 @@ func (s S) Ingresses() []v1beta1.Ingress { return s.ingresses } +// HorizontalPodAutoscalers returns the list of HPAs objects for this strategy. +func (s S) HorizontalPodAutoscalers() []autoscalingv2beta2.HorizontalPodAutoscaler { + return s.horizontalPodAutoscalers +} + // Kafkas returns the list of Kafkas for this strategy. func (s S) Kafkas() []kafkav1beta1.Kafka { return s.kafkas diff --git a/pkg/strategy/streaming.go b/pkg/strategy/streaming.go index 006517dc9..8678282d3 100644 --- a/pkg/strategy/streaming.go +++ b/pkg/strategy/streaming.go @@ -92,6 +92,9 @@ func newStreamingStrategy(ctx context.Context, jaeger *v1.Jaeger) S { } } + // add autoscalers + manifest.horizontalPodAutoscalers = collector.Autoscalers() + if isBoolTrue(jaeger.Spec.Storage.Dependencies.Enabled) { if cronjob.SupportedStorage(jaeger.Spec.Storage.Type) { manifest.cronJobs = append(manifest.cronJobs, *cronjob.CreateSparkDependencies(jaeger)) diff --git a/pkg/strategy/streaming_test.go b/pkg/strategy/streaming_test.go index 21f4cb15c..5fec7c953 100644 --- a/pkg/strategy/streaming_test.go +++ b/pkg/strategy/streaming_test.go @@ -165,6 +165,12 @@ func TestDelegateStreamingDependencies(t *testing.T) { assert.Equal(t, c.Dependencies(), storage.Dependencies(j)) } +func TestAutoscaleForStreaming(t *testing.T) { + j := v1.NewJaeger(types.NamespacedName{Name: "my-instance"}) + c := newStreamingStrategy(context.Background(), j) + assert.Len(t, c.HorizontalPodAutoscalers(), 1) +} + func assertDeploymentsAndServicesForStreaming(t *testing.T, name string, s S, hasDaemonSet bool, hasOAuthProxy bool, hasConfigMap bool) { expectedNumObjs := 7 diff --git a/test/role.yaml b/test/role.yaml index df63ec9e1..6cd5d7785 100644 --- a/test/role.yaml +++ b/test/role.yaml @@ -91,3 +91,9 @@ rules: - kafkausers verbs: - '*' +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - '*'