From d2742306e106c423fc70fc1778d693aba16d4c84 Mon Sep 17 00:00:00 2001 From: Kristina Pathak Date: Mon, 9 Jan 2023 18:23:27 -0800 Subject: [PATCH 1/5] add DiscoveryManager interface, ScrapeConfigsHandler tests --- cmd/otel-allocator/server/bench_test.go | 263 ++++++++++++ cmd/otel-allocator/server/server.go | 11 +- cmd/otel-allocator/server/server_test.go | 525 +++++++++++++++++++++-- 3 files changed, 759 insertions(+), 40 deletions(-) create mode 100644 cmd/otel-allocator/server/bench_test.go diff --git a/cmd/otel-allocator/server/bench_test.go b/cmd/otel-allocator/server/bench_test.go new file mode 100644 index 0000000000..2219eaadb2 --- /dev/null +++ b/cmd/otel-allocator/server/bench_test.go @@ -0,0 +1,263 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "fmt" + "math/rand" + "net/http/httptest" + "testing" + "time" + + "github.com/prometheus/common/model" + promconfig "github.com/prometheus/prometheus/config" + + "github.com/open-telemetry/opentelemetry-operator/cmd/otel-allocator/allocation" + "github.com/open-telemetry/opentelemetry-operator/cmd/otel-allocator/target" +) + +func BenchmarkServerTargetsHandler(b *testing.B) { + rand.Seed(time.Now().UnixNano()) + var table = []struct { + numCollectors int + numJobs int + }{ + {numCollectors: 100, numJobs: 100}, + {numCollectors: 100, numJobs: 1000}, + {numCollectors: 100, numJobs: 10000}, + {numCollectors: 100, numJobs: 100000}, + {numCollectors: 1000, numJobs: 100}, + {numCollectors: 1000, numJobs: 1000}, + {numCollectors: 1000, numJobs: 10000}, + {numCollectors: 1000, numJobs: 100000}, + } + + for _, allocatorName := range allocation.GetRegisteredAllocatorNames() { + for _, v := range table { + a, _ := allocation.New(allocatorName, logger) + cols := allocation.MakeNCollectors(v.numCollectors, 0) + targets := allocation.MakeNNewTargets(v.numJobs, v.numCollectors, 0) + listenAddr := ":8080" + a.SetCollectors(cols) + a.SetTargets(targets) + s := NewServer(logger, a, nil, &listenAddr) + b.Run(fmt.Sprintf("%s_num_cols_%d_num_jobs_%d", allocatorName, v.numCollectors, v.numJobs), func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + randomJob := rand.Intn(v.numJobs) //nolint: gosec + randomCol := rand.Intn(v.numCollectors) //nolint: gosec + request := httptest.NewRequest("GET", fmt.Sprintf("/jobs/test-job-%d/targets?collector_id=collector-%d", randomJob, randomCol), nil) + w := httptest.NewRecorder() + s.server.Handler.ServeHTTP(w, request) + } + }) + } + } +} + +func BenchmarkScrapeConfigsHandler(b *testing.B) { + rand.Seed(time.Now().UnixNano()) + s := &Server{ + logger: logger, + } + + tests := []int{0, 5, 10, 50, 100, 500} + for _, n := range tests { + data := makeNScrapeConfigs(n) + b.Run(fmt.Sprintf("%d_targets", n), func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + s.compareHash = 0 + s.discoveryManager = &mockDiscoveryManager{m: data} + resp := httptest.NewRecorder() + s.ScrapeConfigsHandler(resp, nil) + } + }) + } +} + +func BenchmarkCollectorMapJSONHandler(b *testing.B) { + rand.Seed(time.Now().UnixNano()) + s := &Server{ + logger: logger, + } + + tests := []struct { + numCollectors int + numTargets int + }{ + { + numCollectors: 0, + numTargets: 0, + }, + { + numCollectors: 5, + numTargets: 5, + }, + { + numCollectors: 5, + numTargets: 50, + }, + { + numCollectors: 5, + numTargets: 500, + }, + { + numCollectors: 50, + numTargets: 5, + }, + { + numCollectors: 50, + numTargets: 50, + }, + { + numCollectors: 50, + numTargets: 500, + }, + { + numCollectors: 50, + numTargets: 5000, + }, + } + for _, tc := range tests { + data := makeNCollectorJSON(tc.numCollectors, tc.numTargets) + b.Run(fmt.Sprintf("%d_collectors_%d_targets", tc.numCollectors, tc.numTargets), func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + resp := httptest.NewRecorder() + s.jsonHandler(resp, data) + } + }) + } +} + +func BenchmarkTargetItemsJSONHandler(b *testing.B) { + rand.Seed(time.Now().UnixNano()) + s := &Server{ + logger: logger, + } + + tests := []struct { + numTargets int + numLabels int + }{ + { + numTargets: 0, + numLabels: 0, + }, + { + numTargets: 5, + numLabels: 5, + }, + { + numTargets: 5, + numLabels: 50, + }, + { + numTargets: 50, + numLabels: 5, + }, + { + numTargets: 50, + numLabels: 50, + }, + { + numTargets: 500, + numLabels: 50, + }, + { + numTargets: 500, + numLabels: 500, + }, + { + numTargets: 5000, + numLabels: 50, + }, + { + numTargets: 5000, + numLabels: 500, + }, + } + for _, tc := range tests { + data := makeNTargetItems(tc.numTargets, tc.numLabels) + b.Run(fmt.Sprintf("%d_targets_%d_labels", tc.numTargets, tc.numLabels), func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + resp := httptest.NewRecorder() + s.jsonHandler(resp, data) + } + }) + } +} + +var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-_/") + +func randSeq(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = letters[rand.Intn(len(letters))] //nolint:gosec + } + return string(b) +} + +func makeNScrapeConfigs(n int) map[string]*promconfig.ScrapeConfig { + items := make(map[string]*promconfig.ScrapeConfig, n) + for i := 0; i < n; i++ { + items[randSeq(20)] = &promconfig.ScrapeConfig{ + JobName: randSeq(20), + ScrapeInterval: model.Duration(30 * time.Second), + ScrapeTimeout: model.Duration(time.Minute), + MetricsPath: randSeq(50), + SampleLimit: 5, + TargetLimit: 200, + LabelLimit: 20, + LabelNameLengthLimit: 50, + LabelValueLengthLimit: 100, + } + } + return items +} + +func makeNCollectorJSON(numCollectors, numItems int) map[string]collectorJSON { + items := make(map[string]collectorJSON, numCollectors) + for i := 0; i < numCollectors; i++ { + items[randSeq(20)] = collectorJSON{ + Link: randSeq(120), + Jobs: makeNTargetItems(numItems, 50), + } + } + return items +} + +func makeNTargetItems(numItems, numLabels int) []*target.Item { + items := make([]*target.Item, 0, numItems) + for i := 0; i < numItems; i++ { + items = append(items, target.NewItem( + randSeq(80), + randSeq(150), + makeNNewLabels(numLabels), + randSeq(30), + )) + } + return items +} + +func makeNNewLabels(n int) model.LabelSet { + labels := make(map[model.LabelName]model.LabelValue, n) + for i := 0; i < n; i++ { + labels[model.LabelName(randSeq(20))] = model.LabelValue(randSeq(20)) + } + return labels +} diff --git a/cmd/otel-allocator/server/server.go b/cmd/otel-allocator/server/server.go index 61807b0052..52f3f782b2 100644 --- a/cmd/otel-allocator/server/server.go +++ b/cmd/otel-allocator/server/server.go @@ -18,6 +18,7 @@ import ( "context" "encoding/json" "fmt" + promconfig "github.com/prometheus/prometheus/config" "net/http" "net/url" "time" @@ -47,17 +48,21 @@ type collectorJSON struct { Jobs []*target.Item `json:"targets"` } +type DiscoveryManager interface { + GetScrapeConfigs() map[string]*promconfig.ScrapeConfig +} + type Server struct { logger logr.Logger allocator allocation.Allocator - discoveryManager *target.Discoverer + discoveryManager DiscoveryManager server *http.Server compareHash uint64 scrapeConfigResponse []byte } -func NewServer(log logr.Logger, allocator allocation.Allocator, discoveryManager *target.Discoverer, listenAddr *string) *Server { +func NewServer(log logr.Logger, allocator allocation.Allocator, discoveryManager DiscoveryManager, listenAddr *string) *Server { s := &Server{ logger: log, allocator: allocator, @@ -166,7 +171,7 @@ func (s *Server) TargetsHandler(w http.ResponseWriter, r *http.Request) { } func (s *Server) errorHandler(w http.ResponseWriter, err error) { - w.WriteHeader(500) + w.WriteHeader(http.StatusInternalServerError) s.jsonHandler(w, err) } diff --git a/cmd/otel-allocator/server/server_test.go b/cmd/otel-allocator/server/server_test.go index 3f3c7da118..2a2dbd09b8 100644 --- a/cmd/otel-allocator/server/server_test.go +++ b/cmd/otel-allocator/server/server_test.go @@ -15,16 +15,21 @@ package server import ( - "crypto/rand" "encoding/json" "fmt" "io" - "math/big" + "net/http" "net/http/httptest" "testing" + "time" + "github.com/prometheus/common/config" "github.com/prometheus/common/model" + promconfig "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/model/relabel" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" logf "sigs.k8s.io/controller-runtime/pkg/log" "github.com/open-telemetry/opentelemetry-operator/cmd/otel-allocator/allocation" @@ -44,6 +49,14 @@ var ( testJobTargetItemTwo = target.NewItem("test-job", "test-url2", testJobLabelSetTwo, "test-collector2") ) +type mockDiscoveryManager struct { + m map[string]*promconfig.ScrapeConfig +} + +func (m *mockDiscoveryManager) GetScrapeConfigs() map[string]*promconfig.ScrapeConfig { + return m.m +} + func TestServer_TargetsHandler(t *testing.T) { leastWeighted, _ := allocation.New("least-weighted", logger) type args struct { @@ -153,8 +166,11 @@ func TestServer_TargetsHandler(t *testing.T) { tt.args.allocator.SetTargets(tt.args.cMap) request := httptest.NewRequest("GET", fmt.Sprintf("/jobs/%s/targets?collector_id=%s", tt.args.job, tt.args.collector), nil) w := httptest.NewRecorder() + s.server.Handler.ServeHTTP(w, request) result := w.Result() + + assert.Equal(t, http.StatusOK, result.StatusCode) body := result.Body bodyBytes, err := io.ReadAll(body) assert.NoError(t, err) @@ -170,45 +186,480 @@ func TestServer_TargetsHandler(t *testing.T) { } } -func randInt(max int64) int64 { - nBig, _ := rand.Int(rand.Reader, big.NewInt(max)) - return nBig.Int64() +func TestServer_ScrapeConfigsHandler(t *testing.T) { + tests := []struct { + description string + scrapeConfigs map[string]*promconfig.ScrapeConfig + expectedCode int + expectedBody []byte + }{ + { + description: "nil scrape config", + scrapeConfigs: nil, + expectedCode: http.StatusOK, + expectedBody: []byte{}, + }, + { + description: "empty scrape config", + scrapeConfigs: map[string]*promconfig.ScrapeConfig{}, + expectedCode: http.StatusOK, + expectedBody: []byte{}, + }, + { + description: "single entry", + scrapeConfigs: map[string]*promconfig.ScrapeConfig{ + "serviceMonitor/testapp/testapp/0": { + JobName: "serviceMonitor/testapp/testapp/0", + HonorTimestamps: true, + ScrapeInterval: model.Duration(30 * time.Second), + ScrapeTimeout: model.Duration(30 * time.Second), + MetricsPath: "/metrics", + Scheme: "http", + HTTPClientConfig: config.HTTPClientConfig{ + FollowRedirects: true, + }, + RelabelConfigs: []*relabel.Config{ + { + SourceLabels: model.LabelNames{model.LabelName("job")}, + Separator: ";", + Regex: relabel.MustNewRegexp("(.*)"), + TargetLabel: "__tmp_prometheus_job_name", + Replacement: "$$1", + Action: relabel.Replace, + }, + }, + }, + }, + expectedCode: http.StatusOK, + }, + { + description: "multiple entries", + scrapeConfigs: map[string]*promconfig.ScrapeConfig{ + "serviceMonitor/testapp/testapp/0": { + JobName: "serviceMonitor/testapp/testapp/0", + HonorTimestamps: true, + ScrapeInterval: model.Duration(30 * time.Second), + ScrapeTimeout: model.Duration(30 * time.Second), + MetricsPath: "/metrics", + Scheme: "http", + HTTPClientConfig: config.HTTPClientConfig{ + FollowRedirects: true, + }, + RelabelConfigs: []*relabel.Config{ + { + SourceLabels: model.LabelNames{model.LabelName("job")}, + Separator: ";", + Regex: relabel.MustNewRegexp("(.*)"), + TargetLabel: "__tmp_prometheus_job_name", + Replacement: "$$1", + Action: relabel.Replace, + }, + { + SourceLabels: model.LabelNames{ + model.LabelName("__meta_kubernetes_service_label_app_kubernetes_io_name"), + model.LabelName("__meta_kubernetes_service_labelpresent_app_kubernetes_io_name"), + }, + Separator: ";", + Regex: relabel.MustNewRegexp("(testapp);true"), + Replacement: "$$1", + Action: relabel.Keep, + }, + { + SourceLabels: model.LabelNames{model.LabelName("__meta_kubernetes_endpoint_port_name")}, + Separator: ";", + Regex: relabel.MustNewRegexp("http"), + Replacement: "$$1", + Action: relabel.Keep, + }, + { + SourceLabels: model.LabelNames{model.LabelName("__meta_kubernetes_namespace")}, + Separator: ";", + Regex: relabel.MustNewRegexp("(.*)"), + TargetLabel: "namespace", + Replacement: "$$1", + Action: relabel.Replace, + }, + { + SourceLabels: model.LabelNames{model.LabelName("__meta_kubernetes_service_name")}, + Separator: ";", + Regex: relabel.MustNewRegexp("(.*)"), + TargetLabel: "service", + Replacement: "$$1", + Action: relabel.Replace, + }, + { + SourceLabels: model.LabelNames{model.LabelName("__meta_kubernetes_pod_name")}, + Separator: ";", + Regex: relabel.MustNewRegexp("(.*)"), + TargetLabel: "pod", + Replacement: "$$1", + Action: relabel.Replace, + }, + { + SourceLabels: model.LabelNames{model.LabelName("__meta_kubernetes_pod_container_name")}, + Separator: ";", + Regex: relabel.MustNewRegexp("(.*)"), + TargetLabel: "container", + Replacement: "$$1", + Action: relabel.Replace, + }, + }, + }, + "serviceMonitor/testapp/testapp1/0": { + JobName: "serviceMonitor/testapp/testapp1/0", + HonorTimestamps: true, + ScrapeInterval: model.Duration(5 * time.Minute), + ScrapeTimeout: model.Duration(10 * time.Second), + MetricsPath: "/v2/metrics", + Scheme: "http", + HTTPClientConfig: config.HTTPClientConfig{ + FollowRedirects: true, + }, + RelabelConfigs: []*relabel.Config{ + { + SourceLabels: model.LabelNames{model.LabelName("job")}, + Separator: ";", + Regex: relabel.MustNewRegexp("(.*)"), + TargetLabel: "__tmp_prometheus_job_name", + Replacement: "$$1", + Action: relabel.Replace, + }, + { + SourceLabels: model.LabelNames{ + model.LabelName("__meta_kubernetes_service_label_app_kubernetes_io_name"), + model.LabelName("__meta_kubernetes_service_labelpresent_app_kubernetes_io_name"), + }, + Separator: ";", + Regex: relabel.MustNewRegexp("(testapp);true"), + Replacement: "$$1", + Action: relabel.Keep, + }, + { + SourceLabels: model.LabelNames{model.LabelName("__meta_kubernetes_endpoint_port_name")}, + Separator: ";", + Regex: relabel.MustNewRegexp("http"), + Replacement: "$$1", + Action: relabel.Keep, + }, + { + SourceLabels: model.LabelNames{model.LabelName("__meta_kubernetes_namespace")}, + Separator: ";", + Regex: relabel.MustNewRegexp("(.*)"), + TargetLabel: "namespace", + Replacement: "$$1", + Action: relabel.Replace, + }, + { + SourceLabels: model.LabelNames{model.LabelName("__meta_kubernetes_service_name")}, + Separator: ";", + Regex: relabel.MustNewRegexp("(.*)"), + TargetLabel: "service", + Replacement: "$$1", + Action: relabel.Replace, + }, + { + SourceLabels: model.LabelNames{model.LabelName("__meta_kubernetes_pod_name")}, + Separator: ";", + Regex: relabel.MustNewRegexp("(.*)"), + TargetLabel: "pod", + Replacement: "$$1", + Action: relabel.Replace, + }, + { + SourceLabels: model.LabelNames{model.LabelName("__meta_kubernetes_pod_container_name")}, + Separator: ";", + Regex: relabel.MustNewRegexp("(.*)"), + TargetLabel: "container", + Replacement: "$$1", + Action: relabel.Replace, + }, + }, + }, + "serviceMonitor/testapp/testapp2/0": { + JobName: "serviceMonitor/testapp/testapp2/0", + HonorTimestamps: true, + ScrapeInterval: model.Duration(30 * time.Minute), + ScrapeTimeout: model.Duration(2 * time.Minute), + MetricsPath: "/metrics", + Scheme: "http", + HTTPClientConfig: config.HTTPClientConfig{ + FollowRedirects: true, + }, + RelabelConfigs: []*relabel.Config{ + { + SourceLabels: model.LabelNames{model.LabelName("job")}, + Separator: ";", + Regex: relabel.MustNewRegexp("(.*)"), + TargetLabel: "__tmp_prometheus_job_name", + Replacement: "$$1", + Action: relabel.Replace, + }, + { + SourceLabels: model.LabelNames{ + model.LabelName("__meta_kubernetes_service_label_app_kubernetes_io_name"), + model.LabelName("__meta_kubernetes_service_labelpresent_app_kubernetes_io_name"), + }, + Separator: ";", + Regex: relabel.MustNewRegexp("(testapp);true"), + Replacement: "$$1", + Action: relabel.Keep, + }, + { + SourceLabels: model.LabelNames{model.LabelName("__meta_kubernetes_endpoint_port_name")}, + Separator: ";", + Regex: relabel.MustNewRegexp("http"), + Replacement: "$$1", + Action: relabel.Keep, + }, + { + SourceLabels: model.LabelNames{model.LabelName("__meta_kubernetes_namespace")}, + Separator: ";", + Regex: relabel.MustNewRegexp("(.*)"), + TargetLabel: "namespace", + Replacement: "$$1", + Action: relabel.Replace, + }, + { + SourceLabels: model.LabelNames{model.LabelName("__meta_kubernetes_service_name")}, + Separator: ";", + Regex: relabel.MustNewRegexp("(.*)"), + TargetLabel: "service", + Replacement: "$$1", + Action: relabel.Replace, + }, + { + SourceLabels: model.LabelNames{model.LabelName("__meta_kubernetes_pod_name")}, + Separator: ";", + Regex: relabel.MustNewRegexp("(.*)"), + TargetLabel: "pod", + Replacement: "$$1", + Action: relabel.Replace, + }, + { + SourceLabels: model.LabelNames{model.LabelName("__meta_kubernetes_pod_container_name")}, + Separator: ";", + Regex: relabel.MustNewRegexp("(.*)"), + TargetLabel: "container", + Replacement: "$$1", + Action: relabel.Replace, + }, + }, + }, + }, + expectedCode: http.StatusOK, + }, + } + for _, tc := range tests { + t.Run(tc.description, func(t *testing.T) { + listenAddr := ":8080" + dm := &mockDiscoveryManager{m: tc.scrapeConfigs} + s := NewServer(logger, nil, dm, &listenAddr) + request := httptest.NewRequest("GET", "/scrape_configs", nil) + w := httptest.NewRecorder() + + s.server.Handler.ServeHTTP(w, request) + result := w.Result() + + assert.Equal(t, tc.expectedCode, result.StatusCode) + bodyBytes, err := io.ReadAll(result.Body) + require.NoError(t, err) + if tc.expectedBody != nil { + assert.Equal(t, tc.expectedBody, bodyBytes) + return + } + scrapeConfigs := map[string]*promconfig.ScrapeConfig{} + err = yaml.Unmarshal(bodyBytes, scrapeConfigs) + require.NoError(t, err) + assert.Equal(t, tc.scrapeConfigs, scrapeConfigs) + }) + } } -func BenchmarkServerTargetsHandler(b *testing.B) { - var table = []struct { - numCollectors int - numJobs int +func TestScrapeConfigsHandler_Hashing(t *testing.T) { + s := &Server{logger: logger} + // these tests are meant to be run sequentially in this order, to test + // that hashing doesn't cause us to send the wrong information. + tests := []struct { + description string + scrapeConfigs map[string]*promconfig.ScrapeConfig }{ - {numCollectors: 100, numJobs: 100}, - {numCollectors: 100, numJobs: 1000}, - {numCollectors: 100, numJobs: 10000}, - {numCollectors: 100, numJobs: 100000}, - {numCollectors: 1000, numJobs: 100}, - {numCollectors: 1000, numJobs: 1000}, - {numCollectors: 1000, numJobs: 10000}, - {numCollectors: 1000, numJobs: 100000}, + { + description: "base config", + scrapeConfigs: map[string]*promconfig.ScrapeConfig{ + "serviceMonitor/testapp/testapp/0": { + JobName: "serviceMonitor/testapp/testapp/0", + HonorTimestamps: true, + ScrapeInterval: model.Duration(30 * time.Second), + ScrapeTimeout: model.Duration(30 * time.Second), + MetricsPath: "/metrics", + Scheme: "http", + HTTPClientConfig: config.HTTPClientConfig{ + FollowRedirects: true, + }, + RelabelConfigs: []*relabel.Config{ + { + SourceLabels: model.LabelNames{model.LabelName("job")}, + Separator: ";", + Regex: relabel.MustNewRegexp("(.*)"), + TargetLabel: "__tmp_prometheus_job_name", + Replacement: "$$1", + Action: relabel.Replace, + }, + }, + }, + }, + }, + { + description: "different bool", + scrapeConfigs: map[string]*promconfig.ScrapeConfig{ + "serviceMonitor/testapp/testapp/0": { + JobName: "serviceMonitor/testapp/testapp/0", + HonorTimestamps: false, + ScrapeInterval: model.Duration(30 * time.Second), + ScrapeTimeout: model.Duration(30 * time.Second), + MetricsPath: "/metrics", + Scheme: "http", + HTTPClientConfig: config.HTTPClientConfig{ + FollowRedirects: true, + }, + RelabelConfigs: []*relabel.Config{ + { + SourceLabels: model.LabelNames{model.LabelName("job")}, + Separator: ";", + Regex: relabel.MustNewRegexp("(.*)"), + TargetLabel: "__tmp_prometheus_job_name", + Replacement: "$$1", + Action: relabel.Replace, + }, + }, + }, + }, + }, + { + description: "different job name", + scrapeConfigs: map[string]*promconfig.ScrapeConfig{ + "serviceMonitor/testapp/testapp/0": { + JobName: "serviceMonitor/testapp/testapp/1", + HonorTimestamps: false, + ScrapeInterval: model.Duration(30 * time.Second), + ScrapeTimeout: model.Duration(30 * time.Second), + MetricsPath: "/metrics", + Scheme: "http", + HTTPClientConfig: config.HTTPClientConfig{ + FollowRedirects: true, + }, + RelabelConfigs: []*relabel.Config{ + { + SourceLabels: model.LabelNames{model.LabelName("job")}, + Separator: ";", + Regex: relabel.MustNewRegexp("(.*)"), + TargetLabel: "__tmp_prometheus_job_name", + Replacement: "$$1", + Action: relabel.Replace, + }, + }, + }, + }, + }, + { + description: "different key", + scrapeConfigs: map[string]*promconfig.ScrapeConfig{ + "serviceMonitor/testapp/testapp/1": { + JobName: "serviceMonitor/testapp/testapp/1", + HonorTimestamps: false, + ScrapeInterval: model.Duration(30 * time.Second), + ScrapeTimeout: model.Duration(30 * time.Second), + MetricsPath: "/metrics", + Scheme: "http", + HTTPClientConfig: config.HTTPClientConfig{ + FollowRedirects: true, + }, + RelabelConfigs: []*relabel.Config{ + { + SourceLabels: model.LabelNames{model.LabelName("job")}, + Separator: ";", + Regex: relabel.MustNewRegexp("(.*)"), + TargetLabel: "__tmp_prometheus_job_name", + Replacement: "$$1", + Action: relabel.Replace, + }, + }, + }, + }, + }, + { + description: "unset scrape interval", + scrapeConfigs: map[string]*promconfig.ScrapeConfig{ + "serviceMonitor/testapp/testapp/1": { + JobName: "serviceMonitor/testapp/testapp/1", + HonorTimestamps: false, + ScrapeTimeout: model.Duration(30 * time.Second), + MetricsPath: "/metrics", + Scheme: "http", + HTTPClientConfig: config.HTTPClientConfig{ + FollowRedirects: true, + }, + RelabelConfigs: []*relabel.Config{ + { + SourceLabels: model.LabelNames{model.LabelName("job")}, + Separator: ";", + Regex: relabel.MustNewRegexp("(.*)"), + TargetLabel: "__tmp_prometheus_job_name", + Replacement: "$$1", + Action: relabel.Replace, + }, + }, + }, + }, + }, + //{ + // + // TODO: fix handler logic so this test passes. + // This test currently fails due to the regexp struct not having any + // exported fields for the hashing algorithm to hash on, causing the + // hashes to be the same even though the data is different. + // + // description: "different regex", + // scrapeConfigs: map[string]*promconfig.ScrapeConfig{ + // "serviceMonitor/testapp/testapp/1": { + // JobName: "serviceMonitor/testapp/testapp/1", + // HonorTimestamps: false, + // ScrapeTimeout: model.Duration(30 * time.Second), + // MetricsPath: "/metrics", + // HTTPClientConfig: config.HTTPClientConfig{ + // FollowRedirects: true, + // }, + // RelabelConfigs: []*relabel.Config{ + // { + // SourceLabels: model.LabelNames{model.LabelName("job")}, + // Separator: ";", + // Regex: relabel.MustNewRegexp("something else"), + // TargetLabel: "__tmp_prometheus_job_name", + // Replacement: "$$1", + // Action: relabel.Replace, + // }, + // }, + // }, + // }, + //}, } + for _, tc := range tests { + t.Run(tc.description, func(t *testing.T) { + dm := &mockDiscoveryManager{m: tc.scrapeConfigs} + s.discoveryManager = dm + request := httptest.NewRequest("GET", "/scrape_configs", nil) + w := httptest.NewRecorder() - for _, allocatorName := range allocation.GetRegisteredAllocatorNames() { - for _, v := range table { - a, _ := allocation.New(allocatorName, logger) - cols := allocation.MakeNCollectors(v.numCollectors, 0) - targets := allocation.MakeNNewTargets(v.numJobs, v.numCollectors, 0) - listenAddr := ":8080" - a.SetCollectors(cols) - a.SetTargets(targets) - s := NewServer(logger, a, nil, &listenAddr) - b.Run(fmt.Sprintf("%s_num_cols_%d_num_jobs_%d", allocatorName, v.numCollectors, v.numJobs), func(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N; i++ { - randomJob := randInt(int64(v.numJobs)) - randomCol := randInt(int64(v.numCollectors)) - request := httptest.NewRequest("GET", fmt.Sprintf("/jobs/test-job-%d/targets?collector_id=collector-%d", randomJob, randomCol), nil) - w := httptest.NewRecorder() - s.server.Handler.ServeHTTP(w, request) - } - }) - } + s.ScrapeConfigsHandler(w, request) + result := w.Result() + + assert.Equal(t, http.StatusOK, result.StatusCode) + bodyBytes, err := io.ReadAll(result.Body) + require.NoError(t, err) + scrapeConfigResult := map[string]*promconfig.ScrapeConfig{} + err = yaml.Unmarshal(bodyBytes, scrapeConfigResult) + require.NoError(t, err) + assert.Equal(t, tc.scrapeConfigs, scrapeConfigResult) + }) } } From d5976d1f5da042e3acb6a876d30fad399a7197ba Mon Sep 17 00:00:00 2001 From: Kristina Pathak Date: Mon, 23 Jan 2023 18:03:53 -0800 Subject: [PATCH 2/5] add job handler tests --- cmd/otel-allocator/server/mocks_test.go | 53 ++++++++++++++++ cmd/otel-allocator/server/server.go | 2 +- cmd/otel-allocator/server/server_test.go | 80 +++++++++++++++++++++--- 3 files changed, 126 insertions(+), 9 deletions(-) create mode 100644 cmd/otel-allocator/server/mocks_test.go diff --git a/cmd/otel-allocator/server/mocks_test.go b/cmd/otel-allocator/server/mocks_test.go new file mode 100644 index 0000000000..d0a3e178b3 --- /dev/null +++ b/cmd/otel-allocator/server/mocks_test.go @@ -0,0 +1,53 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + promconfig "github.com/prometheus/prometheus/config" + + "github.com/open-telemetry/opentelemetry-operator/cmd/otel-allocator/allocation" + "github.com/open-telemetry/opentelemetry-operator/cmd/otel-allocator/target" +) + +var ( + _ DiscoveryManager = &mockDiscoveryManager{} + _ allocation.Allocator = &mockAllocator{} +) + +type mockDiscoveryManager struct { + m map[string]*promconfig.ScrapeConfig +} + +func (m *mockDiscoveryManager) GetScrapeConfigs() map[string]*promconfig.ScrapeConfig { + return m.m +} + +// mockAllocator implements the Allocator interface, but all funcs other than +// TargetItems() are a no-op. +type mockAllocator struct { + targetItems map[string]*target.Item +} + +func (m *mockAllocator) SetCollectors(_ map[string]*allocation.Collector) {} +func (m *mockAllocator) SetTargets(_ map[string]*target.Item) {} +func (m *mockAllocator) Collectors() map[string]*allocation.Collector { return nil } +func (m *mockAllocator) GetTargetsForCollectorAndJob(_ string, _ string) []*target.Item { return nil } +func (m *mockAllocator) SetFilter(_ allocation.Filter) {} + +func (m *mockAllocator) TargetItems() map[string]*target.Item { + return m.targetItems +} + + diff --git a/cmd/otel-allocator/server/server.go b/cmd/otel-allocator/server/server.go index 52f3f782b2..c4c67847f0 100644 --- a/cmd/otel-allocator/server/server.go +++ b/cmd/otel-allocator/server/server.go @@ -18,7 +18,6 @@ import ( "context" "encoding/json" "fmt" - promconfig "github.com/prometheus/prometheus/config" "net/http" "net/url" "time" @@ -30,6 +29,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/client_golang/prometheus/promhttp" + promconfig "github.com/prometheus/prometheus/config" "gopkg.in/yaml.v2" "github.com/open-telemetry/opentelemetry-operator/cmd/otel-allocator/allocation" diff --git a/cmd/otel-allocator/server/server_test.go b/cmd/otel-allocator/server/server_test.go index 2a2dbd09b8..a42d482549 100644 --- a/cmd/otel-allocator/server/server_test.go +++ b/cmd/otel-allocator/server/server_test.go @@ -20,6 +20,7 @@ import ( "io" "net/http" "net/http/httptest" + "net/url" "testing" "time" @@ -49,14 +50,6 @@ var ( testJobTargetItemTwo = target.NewItem("test-job", "test-url2", testJobLabelSetTwo, "test-collector2") ) -type mockDiscoveryManager struct { - m map[string]*promconfig.ScrapeConfig -} - -func (m *mockDiscoveryManager) GetScrapeConfigs() map[string]*promconfig.ScrapeConfig { - return m.m -} - func TestServer_TargetsHandler(t *testing.T) { leastWeighted, _ := allocation.New("least-weighted", logger) type args struct { @@ -663,3 +656,74 @@ func TestScrapeConfigsHandler_Hashing(t *testing.T) { }) } } + +func TestServer_JobHandler(t *testing.T) { + tests := []struct { + description string + targetItems map[string]*target.Item + expectedCode int + expectedJobs map[string]target.LinkJSON + }{ + { + description: "nil jobs", + targetItems: nil, + expectedCode: http.StatusOK, + expectedJobs: make(map[string]target.LinkJSON), + }, + { + description: "empty jobs", + targetItems: map[string]*target.Item{}, + expectedCode: http.StatusOK, + expectedJobs: make(map[string]target.LinkJSON), + }, + { + description: "one job", + targetItems: map[string]*target.Item{ + "targetitem": target.NewItem("job1", "", model.LabelSet{}, ""), + }, + expectedCode: http.StatusOK, + expectedJobs: map[string]target.LinkJSON{ + "job1": newLink("job1"), + }, + }, + { + description: "multiple jobs", + targetItems: map[string]*target.Item{ + "a": target.NewItem("job1", "", model.LabelSet{}, ""), + "b": target.NewItem("job2", "", model.LabelSet{}, ""), + "c": target.NewItem("job3", "", model.LabelSet{}, ""), + "d": target.NewItem("job3", "", model.LabelSet{}, ""), + "e": target.NewItem("job3", "", model.LabelSet{}, "")}, + expectedCode: http.StatusOK, + expectedJobs: map[string]target.LinkJSON{ + "job1": newLink("job1"), + "job2": newLink("job2"), + "job3": newLink("job3"), + }, + }, + } + for _, tc := range tests { + t.Run(tc.description, func(t *testing.T) { + listenAddr := ":8080" + a := &mockAllocator{targetItems: tc.targetItems} + s := NewServer(logger, a, nil, &listenAddr) + request := httptest.NewRequest("GET", "/jobs", nil) + w := httptest.NewRecorder() + + s.server.Handler.ServeHTTP(w, request) + result := w.Result() + + assert.Equal(t, tc.expectedCode, result.StatusCode) + bodyBytes, err := io.ReadAll(result.Body) + require.NoError(t, err) + jobs := map[string]target.LinkJSON{} + err = json.Unmarshal(bodyBytes, &jobs) + require.NoError(t, err) + assert.Equal(t, tc.expectedJobs, jobs) + }) + } +} + +func newLink(jobName string) target.LinkJSON { + return target.LinkJSON{fmt.Sprintf("/jobs/%s/targets", url.QueryEscape(jobName))} +} From 4c0d3e3fab58f400a2b2a0eeb5aa358b7eea7eac Mon Sep 17 00:00:00 2001 From: Kristina Pathak Date: Mon, 23 Jan 2023 18:14:25 -0800 Subject: [PATCH 3/5] add changelog entry --- .chloggen/improve-server-tests.yaml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100755 .chloggen/improve-server-tests.yaml diff --git a/.chloggen/improve-server-tests.yaml b/.chloggen/improve-server-tests.yaml new file mode 100755 index 0000000000..582d0eaf42 --- /dev/null +++ b/.chloggen/improve-server-tests.yaml @@ -0,0 +1,16 @@ +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. operator, target allocator, github action) +component: target allocator + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Added test coverage for server handling. + +# One or more tracking issues related to the change +issues: [1392] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: From f959a0a85f11f49ee28b8cffdc96c09eb5469171 Mon Sep 17 00:00:00 2001 From: Kristina Pathak Date: Mon, 23 Jan 2023 20:51:27 -0800 Subject: [PATCH 4/5] make LinkJSON with keyed fields --- cmd/otel-allocator/server/server_test.go | 2 +- cmd/otel-allocator/target/target.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/otel-allocator/server/server_test.go b/cmd/otel-allocator/server/server_test.go index a42d482549..35ce2467e0 100644 --- a/cmd/otel-allocator/server/server_test.go +++ b/cmd/otel-allocator/server/server_test.go @@ -725,5 +725,5 @@ func TestServer_JobHandler(t *testing.T) { } func newLink(jobName string) target.LinkJSON { - return target.LinkJSON{fmt.Sprintf("/jobs/%s/targets", url.QueryEscape(jobName))} + return target.LinkJSON{Link: fmt.Sprintf("/jobs/%s/targets", url.QueryEscape(jobName))} } diff --git a/cmd/otel-allocator/target/target.go b/cmd/otel-allocator/target/target.go index 22106c0829..51940f47d1 100644 --- a/cmd/otel-allocator/target/target.go +++ b/cmd/otel-allocator/target/target.go @@ -46,7 +46,7 @@ func (t *Item) Hash() string { func NewItem(jobName string, targetURL string, label model.LabelSet, collectorName string) *Item { return &Item{ JobName: jobName, - Link: LinkJSON{fmt.Sprintf("/jobs/%s/targets", url.QueryEscape(jobName))}, + Link: LinkJSON{Link: fmt.Sprintf("/jobs/%s/targets", url.QueryEscape(jobName))}, hash: jobName + targetURL + label.Fingerprint().String(), TargetURL: []string{targetURL}, Labels: label, From b1de37e52aaded4df4ade3bd8dc9064145d7db8a Mon Sep 17 00:00:00 2001 From: Kristina Pathak Date: Tue, 24 Jan 2023 09:49:17 -0800 Subject: [PATCH 5/5] fix whitespace --- cmd/otel-allocator/server/mocks_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/cmd/otel-allocator/server/mocks_test.go b/cmd/otel-allocator/server/mocks_test.go index d0a3e178b3..3b39888806 100644 --- a/cmd/otel-allocator/server/mocks_test.go +++ b/cmd/otel-allocator/server/mocks_test.go @@ -49,5 +49,3 @@ func (m *mockAllocator) SetFilter(_ allocation.Filter) func (m *mockAllocator) TargetItems() map[string]*target.Item { return m.targetItems } - -