diff --git a/.gitignore b/.gitignore index bbe7333e3694..612860c9e169 100644 --- a/.gitignore +++ b/.gitignore @@ -12,11 +12,13 @@ cover.html .mkdocs-virtual-env/ vendor/ examples/hotrod/hotrod -cmd/standalone/standalone-linux +cmd/all-in-one/all-in-one-* cmd/agent/agent cmd/agent/agent-linux cmd/collector/collector cmd/collector/collector-linux +cmd/ingester/ingester +cmd/ingester/ingester-linux cmd/query/query cmd/query/query-linux crossdock/crossdock-linux diff --git a/.travis.yml b/.travis.yml index 9cb9b905440a..ef156b86432e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -59,7 +59,6 @@ script: - if [ "$ES_INTEGRATION_TEST" == true ]; then bash ./scripts/travis/es-integration-test.sh ; else echo 'skipping elastic search integration test'; fi - if [ "$KAFKA_INTEGRATION_TEST" == true ]; then bash ./scripts/travis/kafka-integration-test.sh ; else echo 'skipping kafka integration test'; fi - if [ "$HOTROD" == true ]; then bash ./scripts/travis/hotrod-integration-test.sh ; else echo 'skipping hotrod example'; fi - - if [[ "$DEPLOY" == true && "$BRANCH" == "master" ]]; then make build-all-platforms ; else echo 'skipping linux'; fi after_success: - if [ "$COVERAGE" == true ]; then mv cover.out coverage.txt ; else echo 'skipping coverage'; fi @@ -69,7 +68,7 @@ after_failure: - if [ "$CROSSDOCK" == true ]; then make crossdock-logs ; else echo 'skipping crossdock'; fi before_deploy: - - bash ./scripts/travis/package-deploy.sh + - make build-all-platforms && bash ./scripts/travis/package-deploy.sh deploy: provider: releases diff --git a/CHANGELOG.md b/CHANGELOG.md index ce8ee9d08ea5..c2755cf0f910 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,15 +1,43 @@ Changes by Version ================== -TBD (pending) +1.8.0 (unreleased) +------------------ + +#### Backend Changes + +##### Breaking Changes + +- Consolidate query metrics and include result tag ([#1075](https://github.com/jaegertracing/jaeger/pull/1075), [@objectiser](https://github.com/objectiser) +- Make the metrics produced by jaeger query scoped to the query component, and generated for all span readers (not just ES) ([#1074](https://github.com/jaegertracing/jaeger/pull/1074), [@objectiser](https://github.com/objectiser) + + +1.7.0 (2018-09-19) ------------------ #### UI Changes +- Compare two traces ([#228](https://github.com/jaegertracing/jaeger-ui/pull/228), [@tiffon](https://github.com/tiffon)) +- Make tags clickable ([#223](https://github.com/jaegertracing/jaeger-ui/pull/223), [@divdavem](https://github.com/divdavem)) +- Directed graph as React component ([#224](https://github.com/jaegertracing/jaeger-ui/pull/224), [@tiffon](https://github.com/tiffon)) +- Timeline Expand and Collapse Features ([#221](https://github.com/jaegertracing/jaeger-ui/issues/221), [@davit-y](https://github.com/davit-y)) +- Integrate Google Analytics into Search Page ([#220](https://github.com/jaegertracing/jaeger-ui/issues/220), [@davit-y](https://github.com/davit-y)) + +#### Backend Changes + +##### Breaking changes + +- `jaeger-standalone` binary has been renamed to `jaeger-all-in-one`. This change also includes package rename from `standalone` to `all-in-one` ([#1062](https://github.com/jaegertracing/jaeger/pull/1062), [@pavolloffay](https://github.com/pavolloffay)) + ##### New Features -- Timeline Expand and Collapse Features ([#221](https://github.com/jaegertracing/jaeger-ui/issues/221)) -- Integrate Google Analytics into Search Page ([#220](https://github.com/jaegertracing/jaeger-ui/issues/220)) +- (Experimental) Allow storing tags as object fields in Elasticsearch for better Kibana support(([#1018](https://github.com/jaegertracing/jaeger/pull/1018), [@pavolloffay](https://github.com/pavolloffay)) +- Enable tracing of Cassandra queries ([#1038](https://github.com/jaegertracing/jaeger/pull/1038), [@yurishkuro](https://github.com/yurishkuro)) +- Make Elasticsearch index configurable ([#1009](https://github.com/jaegertracing/jaeger/pull/1009), [@pavolloffay](https://github.com/pavoloffay)) +- Add flags to allow changing ports for HotROD services ([#951](https://github.com/jaegertracing/jaeger/pull/951), [@cboornaz17](https://github.com/cboornaz17)) +- (Experimental) Kafka ingester ([#952](https://github.com/jaegertracing/jaeger/pull/952), [#942](https://github.com/jaegertracing/jaeger/pull/942), [#944](https://github.com/jaegertracing/jaeger/pull/944), [#940](https://github.com/jaegertracing/jaeger/pull/940), [@davit-y](https://github.com/davit-y) and [@vprithvi](https://github.com/vprithvi))) +- Use tags in agent metrics ([#950](https://github.com/jaegertracing/jaeger/pull/950), [@eundoosong](https://github.com/eundoosong)) +- Add support for Cassandra reconnect interval ([#934](https://github.com/jaegertracing/jaeger/pull/934), [@nyanshak](https://github.com/nyanshak)) 1.6.0 (2018-07-10) ------------------ diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4c05a11f8381..4d2bb48f33a5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -43,13 +43,13 @@ make test The `jaeger-ui` submodule contains the source code for the UI assets (requires Node.js 6+). The assets must be compiled first with `make build_ui`, which runs Node.js build and then packages the assets into a Go file that is `.gitignore`-ed. The packaged assets can be enabled by providing a build tag `ui`, e.g.: ``` -$ go run -tags ui ./cmd/standalone/main.go +$ go run -tags ui ./cmd/all-in-one/main.go ``` Alternatively, the path to the built UI assets can be provided via `--query.static-files` flag: ``` -$ go run ./cmd/standalone/main.go --query.static-files jaeger-ui/build +$ go run ./cmd/all-in-one/main.go --query.static-files jaeger-ui/build ``` ## Project Structure diff --git a/Makefile b/Makefile index 668e668477ba..fc1ccec36d5b 100644 --- a/Makefile +++ b/Makefile @@ -34,7 +34,8 @@ GOTEST=go test -v $(RACE) GOLINT=golint GOVET=go vet GOFMT=gofmt -GOSEC=gosec -quiet -exclude=G104 +GOSEC=gosec -quiet -exclude=G104,G107 +GOSIMPLE=gosimple FMT_LOG=fmt.log LINT_LOG=lint.log IMPORT_LOG=import.log @@ -93,7 +94,7 @@ test: go-gen .PHONY: integration-test integration-test: go-gen - $(GOTEST) -tags=integration ./cmd/standalone/... + $(GOTEST) -tags=integration ./cmd/all-in-one/... .PHONY: storage-integration-test storage-integration-test: go-gen @@ -132,6 +133,7 @@ lint-gosec: .PHONY: lint lint: lint-gosec $(GOVET) $(TOP_PKGS) + $(GOSIMPLE) $(TOP_PKGS) @cat /dev/null > $(LINT_LOG) $(GOLINT) $(TOP_PKGS) | \ grep -v \ @@ -182,7 +184,7 @@ build-all-in-one-linux: build_ui .PHONY: build-all-in-one build-all-in-one: - CGO_ENABLED=0 installsuffix=cgo go build -tags ui -o ./cmd/standalone/standalone-$(GOOS) $(BUILD_INFO) ./cmd/standalone/main.go + CGO_ENABLED=0 installsuffix=cgo go build -tags ui -o ./cmd/all-in-one/all-in-one-$(GOOS) $(BUILD_INFO) ./cmd/all-in-one/main.go .PHONY: build-agent build-agent: @@ -196,6 +198,10 @@ build-query: build-collector: CGO_ENABLED=0 installsuffix=cgo go build -o ./cmd/collector/collector-$(GOOS) $(BUILD_INFO) ./cmd/collector/main.go +.PHONY: build-ingester +build-ingester: + CGO_ENABLED=0 installsuffix=cgo go build -o ./cmd/ingester/ingester-$(GOOS) $(BUILD_INFO) ./cmd/ingester/main.go + .PHONY: docker-no-ui docker-no-ui: build-binaries-linux build-crossdock-linux make docker-images-only @@ -216,7 +222,7 @@ build-binaries-darwin: GOOS=darwin $(MAKE) build-platform-binaries .PHONY: build-platform-binaries -build-platform-binaries: build-agent build-collector build-query build-all-in-one build-examples +build-platform-binaries: build-agent build-collector build-query build-ingester build-all-in-one build-examples .PHONY: build-all-platforms build-all-platforms: build-binaries-linux build-binaries-windows build-binaries-darwin @@ -227,7 +233,7 @@ docker-images-only: @echo "Finished building jaeger-cassandra-schema ==============" docker build -t $(DOCKER_NAMESPACE)/jaeger-es-index-cleaner:${DOCKER_TAG} plugin/storage/es @echo "Finished building jaeger-es-indices-clean ==============" - for component in agent collector query ; do \ + for component in agent collector query ingester ; do \ docker build -t $(DOCKER_NAMESPACE)/jaeger-$$component:${DOCKER_TAG} cmd/$$component ; \ echo "Finished building $$component ==============" ; \ done @@ -242,7 +248,7 @@ docker-push: if [ $$CONFIRM != "y" ] && [ $$CONFIRM != "Y" ]; then \ echo "Exiting." ; exit 1 ; \ fi - for component in agent cassandra-schema es-index-cleaner collector query example-hotrod; do \ + for component in agent cassandra-schema es-index-cleaner collector query ingester example-hotrod; do \ docker push $(DOCKER_NAMESPACE)/jaeger-$$component ; \ done @@ -268,11 +274,12 @@ build-crossdock-fresh: build-crossdock-linux .PHONY: install-tools install-tools: - go get github.com/wadey/gocovmerge - go get golang.org/x/tools/cmd/cover - go get github.com/golang/lint/golint - go get github.com/sectioneight/md-to-godoc - go get github.com/securego/gosec/cmd/gosec/... + go get -u github.com/wadey/gocovmerge + go get -u golang.org/x/tools/cmd/cover + go get -u github.com/golang/lint/golint + go get -u github.com/sectioneight/md-to-godoc + go get -u github.com/securego/gosec/cmd/gosec/... + go get -u honnef.co/go/tools/cmd/gosimple .PHONY: install-ci install-ci: install install-tools @@ -386,4 +393,4 @@ proto-install: ./vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway \ ./vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger # ./vendor/github.com/mwitkow/go-proto-validators/protoc-gen-govalidators \ - # ./vendor/github.com/rakyll/statik \ No newline at end of file + # ./vendor/github.com/rakyll/statik diff --git a/cmd/agent/app/httpserver/thrift-0.9.2/constants.go b/cmd/agent/app/httpserver/thrift-0.9.2/constants.go index 58f8b1ab63f3..44ddd4fd4907 100644 --- a/cmd/agent/app/httpserver/thrift-0.9.2/constants.go +++ b/cmd/agent/app/httpserver/thrift-0.9.2/constants.go @@ -20,6 +20,7 @@ package sampling import ( "bytes" "fmt" + "github.com/apache/thrift/lib/go/thrift" ) diff --git a/cmd/agent/app/httpserver/thrift-0.9.2/ttypes.go b/cmd/agent/app/httpserver/thrift-0.9.2/ttypes.go index c6698a9ccfd5..447b60920e41 100644 --- a/cmd/agent/app/httpserver/thrift-0.9.2/ttypes.go +++ b/cmd/agent/app/httpserver/thrift-0.9.2/ttypes.go @@ -20,6 +20,7 @@ package sampling import ( "bytes" "fmt" + "github.com/apache/thrift/lib/go/thrift" ) diff --git a/cmd/agent/app/servers/server.go b/cmd/agent/app/servers/server.go index a27449cba8da..ee5bd22e0af5 100644 --- a/cmd/agent/app/servers/server.go +++ b/cmd/agent/app/servers/server.go @@ -14,7 +14,9 @@ package servers -import "io" +import ( + "io" +) // Server is the interface for servers that receive inbound span submissions from client. type Server interface { diff --git a/cmd/standalone/Dockerfile b/cmd/all-in-one/Dockerfile similarity index 68% rename from cmd/standalone/Dockerfile rename to cmd/all-in-one/Dockerfile index 7f4788163ab5..271bae939fc6 100644 --- a/cmd/standalone/Dockerfile +++ b/cmd/all-in-one/Dockerfile @@ -18,8 +18,8 @@ EXPOSE 14268 # Web HTTP EXPOSE 16686 -COPY ./cmd/standalone/standalone-linux /go/bin/ -COPY ./cmd/standalone/sampling_strategies.json /etc/jaeger/ +COPY ./cmd/all-in-one/all-in-one-linux /go/bin/ +COPY ./cmd/all-in-one/sampling_strategies.json /etc/jaeger/ -ENTRYPOINT ["/go/bin/standalone-linux"] +ENTRYPOINT ["/go/bin/all-in-one-linux"] CMD ["--sampling.strategies-file=/etc/jaeger/sampling_strategies.json"] diff --git a/cmd/standalone/standalone_test.go b/cmd/all-in-one/all_in_one_test.go similarity index 98% rename from cmd/standalone/standalone_test.go rename to cmd/all-in-one/all_in_one_test.go index e892d05ff6b1..4dcf4956a114 100644 --- a/cmd/standalone/standalone_test.go +++ b/cmd/all-in-one/all_in_one_test.go @@ -51,7 +51,7 @@ var ( } ) -func TestStandalone(t *testing.T) { +func TestAllInOne(t *testing.T) { // Check if the query service is available if err := healthCheck(); err != nil { t.Fatal(err) diff --git a/cmd/standalone/main.go b/cmd/all-in-one/main.go similarity index 92% rename from cmd/standalone/main.go rename to cmd/all-in-one/main.go index 02fa4deecf48..cf2b8d2657a7 100644 --- a/cmd/standalone/main.go +++ b/cmd/all-in-one/main.go @@ -26,10 +26,12 @@ import ( "syscall" "github.com/gorilla/mux" + "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/viper" jaegerClientConfig "github.com/uber/jaeger-client-go/config" + jaegerClientZapLog "github.com/uber/jaeger-client-go/log/zap" "github.com/uber/jaeger-lib/metrics" "github.com/uber/tchannel-go" "github.com/uber/tchannel-go/thrift" @@ -53,14 +55,15 @@ import ( "github.com/jaegertracing/jaeger/plugin/storage" "github.com/jaegertracing/jaeger/storage/dependencystore" "github.com/jaegertracing/jaeger/storage/spanstore" + storageMetrics "github.com/jaegertracing/jaeger/storage/spanstore/metrics" jc "github.com/jaegertracing/jaeger/thrift-gen/jaeger" sc "github.com/jaegertracing/jaeger/thrift-gen/sampling" zc "github.com/jaegertracing/jaeger/thrift-gen/zipkincore" ) -// standalone/main is a standalone full-stack jaeger backend, backed by a memory store +// all-in-one/main is a standalone full-stack jaeger backend, backed by a memory store func main() { - var signalsChannel = make(chan os.Signal, 0) + var signalsChannel = make(chan os.Signal) signal.Notify(signalsChannel, os.Interrupt, syscall.SIGTERM) if os.Getenv(storage.SpanStorageTypeEnvVar) == "" { @@ -76,7 +79,7 @@ func main() { } v := viper.New() command := &cobra.Command{ - Use: "jaeger-standalone", + Use: "jaeger-all-in-one", Short: "Jaeger all-in-one distribution with agent, collector and query in one process.", Long: `Jaeger all-in-one distribution with agent, collector and query. Use with caution this version uses only in-memory database.`, @@ -128,18 +131,15 @@ func main() { startCollector(cOpts, spanWriter, logger, metricsFactory, samplingHandler, hc) startQuery(qOpts, spanReader, dependencyReader, logger, metricsFactory, mBldr, hc) hc.Ready() - - select { - case <-signalsChannel: - if closer, ok := spanWriter.(io.Closer); ok { - err := closer.Close() - if err != nil { - logger.Error("Failed to close span writer", zap.Error(err)) - } + <-signalsChannel + logger.Info("Shutting down") + if closer, ok := spanWriter.(io.Closer); ok { + err := closer.Close() + if err != nil { + logger.Error("Failed to close span writer", zap.Error(err)) } - - logger.Info("Jaeger Standalone is finishing") } + logger.Info("Shutdown complete") return nil }, } @@ -277,10 +277,18 @@ func startQuery( Param: 1.0, }, RPCMetrics: true, - }.New("jaeger-query", jaegerClientConfig.Metrics(baseFactory.Namespace("client", nil))) + }.New( + "jaeger-query", + jaegerClientConfig.Metrics(baseFactory.Namespace("client", nil)), + jaegerClientConfig.Logger(jaegerClientZapLog.NewLogger(logger)), + ) if err != nil { logger.Fatal("Failed to initialize tracer", zap.Error(err)) } + opentracing.SetGlobalTracer(tracer) + + spanReader = storageMetrics.NewReadMetricsDecorator(spanReader, baseFactory.Namespace("query", nil)) + apiHandler := queryApp.NewAPIHandler( spanReader, depReader, diff --git a/cmd/standalone/sampling_strategies.json b/cmd/all-in-one/sampling_strategies.json similarity index 100% rename from cmd/standalone/sampling_strategies.json rename to cmd/all-in-one/sampling_strategies.json diff --git a/cmd/collector/app/metrics.go b/cmd/collector/app/metrics.go index cbdf1c8f7ff9..0894529bb673 100644 --- a/cmd/collector/app/metrics.go +++ b/cmd/collector/app/metrics.go @@ -28,7 +28,8 @@ const ( ) // SpanProcessorMetrics contains all the necessary metrics for the SpanProcessor -type SpanProcessorMetrics struct { //TODO - initialize metrics in the traditional factory way. Initialize map afterward. +type SpanProcessorMetrics struct { + //TODO - initialize metrics in the traditional factory way. Initialize map afterward. // SaveLatency measures how long the actual save to storage takes SaveLatency metrics.Timer // InQueueLatency measures how long the span spends in the queue @@ -41,10 +42,11 @@ type SpanProcessorMetrics struct { //TODO - initialize metrics in the traditiona QueueLength metrics.Gauge // ErrorBusy counts number of return ErrServerBusy ErrorBusy metrics.Counter - // SavedBySvc contains span and trace counts by service - SavedBySvc metricsBySvc // spans actually saved - serviceNames metrics.Gauge // total number of unique service name metrics reported by this collector - spanCounts map[string]CountsBySpanType + // SavedOkBySvc contains span and trace counts by service + SavedOkBySvc metricsBySvc // spans actually saved + SavedErrBySvc metricsBySvc // spans failed to save + serviceNames metrics.Gauge // total number of unique service name metrics reported by this collector + spanCounts map[string]CountsBySpanType } type countsBySvc struct { @@ -86,7 +88,8 @@ func NewSpanProcessorMetrics(serviceMetrics metrics.Factory, hostMetrics metrics BatchSize: hostMetrics.Gauge("batch-size", nil), QueueLength: hostMetrics.Gauge("queue-length", nil), ErrorBusy: hostMetrics.Counter("error.busy", nil), - SavedBySvc: newMetricsBySvc(serviceMetrics, "saved-by-svc"), + SavedOkBySvc: newMetricsBySvc(serviceMetrics.Namespace("", map[string]string{"result": "ok"}), "saved-by-svc"), + SavedErrBySvc: newMetricsBySvc(serviceMetrics.Namespace("", map[string]string{"result": "err"}), "saved-by-svc"), spanCounts: spanCounts, serviceNames: hostMetrics.Gauge("spans.serviceNames", nil), } diff --git a/cmd/collector/app/model_consumer.go b/cmd/collector/app/model_consumer.go index e335147cc841..be0d0f92aff4 100644 --- a/cmd/collector/app/model_consumer.go +++ b/cmd/collector/app/model_consumer.go @@ -14,7 +14,9 @@ package app -import "github.com/jaegertracing/jaeger/model" +import ( + "github.com/jaegertracing/jaeger/model" +) // ProcessSpan processes a Domain Model Span type ProcessSpan func(span *model.Span) diff --git a/cmd/collector/app/sanitizer/cache/auto_refresh_cache.go b/cmd/collector/app/sanitizer/cache/auto_refresh_cache.go index e943f870e577..546d12238345 100644 --- a/cmd/collector/app/sanitizer/cache/auto_refresh_cache.go +++ b/cmd/collector/app/sanitizer/cache/auto_refresh_cache.go @@ -1,3 +1,4 @@ +// Copyright (c) 2018 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -45,7 +46,7 @@ func NewAutoRefreshCache( readRefreshInterval, saveRefreshInterval time.Duration, ) Cache { return &autoRefreshCache{ - cache: make(map[string]string, 0), + cache: make(map[string]string), extSource: extSource, storage: storage, logger: logger, diff --git a/cmd/collector/app/sanitizer/cache/auto_refresh_cache_test.go b/cmd/collector/app/sanitizer/cache/auto_refresh_cache_test.go index 5e6c152d4f56..d0b64ebb974d 100644 --- a/cmd/collector/app/sanitizer/cache/auto_refresh_cache_test.go +++ b/cmd/collector/app/sanitizer/cache/auto_refresh_cache_test.go @@ -1,3 +1,4 @@ +// Copyright (c) 2018 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -41,7 +42,7 @@ func getCache(t *testing.T) (*autoRefreshCache, *mocks.ServiceAliasMappingExtern logger := zap.NewNop() return &autoRefreshCache{ - cache: make(map[string]string, 0), + cache: make(map[string]string), extSource: mockExtSource, storage: mockStorage, logger: logger, diff --git a/cmd/collector/app/service_name_normalizer.go b/cmd/collector/app/service_name_normalizer.go index d792ac7a7d8b..f2dc67469105 100644 --- a/cmd/collector/app/service_name_normalizer.go +++ b/cmd/collector/app/service_name_normalizer.go @@ -14,7 +14,9 @@ package app -import "strings" +import ( + "strings" +) // NormalizeServiceName converts service name to a lowercase string that is safe to use in metrics func NormalizeServiceName(serviceName string) string { diff --git a/cmd/collector/app/span_processor.go b/cmd/collector/app/span_processor.go index 2c0da157cddc..faf59b040601 100644 --- a/cmd/collector/app/span_processor.go +++ b/cmd/collector/app/span_processor.go @@ -100,12 +100,13 @@ func (sp *spanProcessor) saveSpan(span *model.Span) { startTime := time.Now() if err := sp.spanWriter.WriteSpan(span); err != nil { sp.logger.Error("Failed to save span", zap.Error(err)) + sp.metrics.SavedErrBySvc.ReportServiceNameForSpan(span) } else { sp.logger.Debug("Span written to the storage by the collector", zap.Stringer("trace-id", span.TraceID), zap.Stringer("span-id", span.SpanID)) - sp.metrics.SavedBySvc.ReportServiceNameForSpan(span) + sp.metrics.SavedOkBySvc.ReportServiceNameForSpan(span) } - sp.metrics.SaveLatency.Record(time.Now().Sub(startTime)) + sp.metrics.SaveLatency.Record(time.Since(startTime)) } func (sp *spanProcessor) ProcessSpans(mSpans []*model.Span, spanFormat string) ([]bool, error) { @@ -124,7 +125,7 @@ func (sp *spanProcessor) ProcessSpans(mSpans []*model.Span, spanFormat string) ( func (sp *spanProcessor) processItemFromQueue(item *queueItem) { sp.processSpan(sp.sanitizer(item.span)) - sp.metrics.InQueueLatency.Record(time.Now().Sub(item.queuedTime)) + sp.metrics.InQueueLatency.Record(time.Since(item.queuedTime)) } func (sp *spanProcessor) enqueueSpan(span *model.Span, originalFormat string) bool { diff --git a/cmd/collector/app/span_processor_test.go b/cmd/collector/app/span_processor_test.go index f00d214339d3..d1700f27f4d8 100644 --- a/cmd/collector/app/span_processor_test.go +++ b/cmd/collector/app/span_processor_test.go @@ -154,11 +154,7 @@ func isSpanAllowed(span *model.Span) bool { return true } - serviceName := span.Process.ServiceName - if serviceName == blackListedService { - return false - } - return true + return span.Process.ServiceName != blackListedService } type fakeSpanWriter struct { @@ -232,8 +228,11 @@ func TestSpanProcessorErrors(t *testing.T) { w := &fakeSpanWriter{ err: fmt.Errorf("some-error"), } + mb := metrics.NewLocalFactory(time.Hour) + serviceMetrics := mb.Namespace("service", nil) p := NewSpanProcessor(w, Options.Logger(logger), + Options.ServiceMetrics(serviceMetrics), ).(*spanProcessor) res, err := p.ProcessSpans([]*model.Span{ @@ -254,6 +253,11 @@ func TestSpanProcessorErrors(t *testing.T) { "msg": "Failed to save span", "error": "some-error", }, logBuf.JSONLine(0)) + + expected := []metricsTest.ExpectedMetric{{ + Name: "service.spans.saved-by-svc|debug=false|result=err|svc=x", Value: 1, + }} + metricsTest.AssertCounterMetrics(t, mb, expected...) } type blockingWriter struct { diff --git a/cmd/collector/main.go b/cmd/collector/main.go index 909ea0dac51f..f22021c40ced 100644 --- a/cmd/collector/main.go +++ b/cmd/collector/main.go @@ -55,7 +55,7 @@ import ( const serviceName = "jaeger-collector" func main() { - var signalsChannel = make(chan os.Signal, 0) + var signalsChannel = make(chan os.Signal) signal.Notify(signalsChannel, os.Interrupt, syscall.SIGTERM) storageFactory, err := storage.NewFactory(storage.FactoryConfigFromEnvAndCLI(os.Args, os.Stderr)) @@ -146,7 +146,7 @@ func main() { go startZipkinHTTPAPI(logger, builderOpts.CollectorZipkinHTTPPort, zipkinSpansHandler, recoveryHandler) - logger.Info("Starting Jaeger Collector HTTP server", zap.Int("http-port", builderOpts.CollectorHTTPPort)) + logger.Info("Starting HTTP server", zap.Int("http-port", builderOpts.CollectorHTTPPort)) go func() { if err := http.ListenAndServe(httpPortStr, recoveryHandler(r)); err != nil { @@ -156,17 +156,16 @@ func main() { }() hc.Ready() - select { - case <-signalsChannel: - if closer, ok := spanWriter.(io.Closer); ok { - err := closer.Close() - if err != nil { - logger.Error("Failed to close span writer", zap.Error(err)) - } + <-signalsChannel + logger.Info("Shutting down") + if closer, ok := spanWriter.(io.Closer); ok { + err := closer.Close() + if err != nil { + logger.Error("Failed to close span writer", zap.Error(err)) } - - logger.Info("Jaeger Collector is finishing") } + + logger.Info("Shutdown complete") return nil }, } diff --git a/cmd/ingester/Dockerfile b/cmd/ingester/Dockerfile new file mode 100644 index 000000000000..2cfa7817eaf4 --- /dev/null +++ b/cmd/ingester/Dockerfile @@ -0,0 +1,10 @@ +FROM alpine:latest as certs +RUN apk add --update --no-cache ca-certificates + +FROM scratch + +COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt + +EXPOSE 14267 +COPY ingester-linux /go/bin/ +ENTRYPOINT ["/go/bin/ingester-linux"] diff --git a/cmd/ingester/app/consumer/message.go b/cmd/ingester/app/consumer/message.go index 3ed64cd87a2f..c8075f82c60b 100644 --- a/cmd/ingester/app/consumer/message.go +++ b/cmd/ingester/app/consumer/message.go @@ -14,7 +14,9 @@ package consumer -import "github.com/Shopify/sarama" +import ( + "github.com/Shopify/sarama" +) // Message contains the parts of a sarama ConsumerMessage that we care about. type Message interface { diff --git a/cmd/ingester/app/consumer/offset/manager.go b/cmd/ingester/app/consumer/offset/manager.go index ac58508cfd3d..f06966b31ad1 100644 --- a/cmd/ingester/app/consumer/offset/manager.go +++ b/cmd/ingester/app/consumer/offset/manager.go @@ -41,6 +41,7 @@ type Manager struct { markOffsetFunction MarkOffset offsetCommitCount metrics.Counter lastCommittedOffset metrics.Gauge + minOffset int64 list *ConcurrentList close chan struct{} isClosed sync.WaitGroup @@ -57,6 +58,7 @@ func NewManager(minOffset int64, markOffset MarkOffset, partition int32, factory offsetCommitCount: factory.Counter("offset-commits-total", map[string]string{"partition": strconv.Itoa(int(partition))}), lastCommittedOffset: factory.Gauge("last-committed-offset", map[string]string{"partition": strconv.Itoa(int(partition))}), list: newConcurrentList(minOffset), + minOffset: minOffset, } } @@ -69,13 +71,17 @@ func (m *Manager) MarkOffset(offset int64) { func (m *Manager) Start() { m.isClosed.Add(1) go func() { + lastCommittedOffset := m.minOffset for { select { case <-time.After(resetInterval): offset := m.list.setToHighestContiguous() - m.offsetCommitCount.Inc(1) - m.lastCommittedOffset.Update(offset) - m.markOffsetFunction(offset) + if lastCommittedOffset != offset { + m.offsetCommitCount.Inc(1) + m.lastCommittedOffset.Update(offset) + m.markOffsetFunction(offset) + lastCommittedOffset = offset + } case <-m.close: m.isClosed.Done() return diff --git a/cmd/ingester/app/consumer/offset/manager_test.go b/cmd/ingester/app/consumer/offset/manager_test.go index 744b99d35fd0..ea1ac4675158 100644 --- a/cmd/ingester/app/consumer/offset/manager_test.go +++ b/cmd/ingester/app/consumer/offset/manager_test.go @@ -17,6 +17,7 @@ package offset import ( "sync" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/uber/jaeger-lib/metrics" @@ -47,3 +48,16 @@ func TestHandleReset(t *testing.T) { assert.Equal(t, int64(1), cnt["offset-commits-total|partition=1"]) assert.Equal(t, int64(offset), g["last-committed-offset|partition=1"]) } + +func TestCache(t *testing.T) { + offset := int64(1498) + + fakeMarker := func(offset int64) { + assert.Fail(t, "Shouldn't mark cached offset") + } + manager := NewManager(offset, fakeMarker, 1, metrics.NullFactory) + manager.Start() + time.Sleep(resetInterval + 50) + manager.MarkOffset(offset) + manager.Close() +} diff --git a/cmd/ingester/app/consumer/processor_factory_test.go b/cmd/ingester/app/consumer/processor_factory_test.go index e9cf040c4d11..b79f6095f611 100644 --- a/cmd/ingester/app/consumer/processor_factory_test.go +++ b/cmd/ingester/app/consumer/processor_factory_test.go @@ -43,20 +43,27 @@ func Test_new(t *testing.T) { partition := int32(21) offset := int64(555) + sp := &mocks.SpanProcessor{} + sp.On("Process", mock.Anything).Return(nil) + pf := ProcessorFactory{ topic: topic, consumer: mockConsumer, metricsFactory: metrics.NullFactory, logger: zap.NewNop(), - baseProcessor: &mocks.SpanProcessor{}, + baseProcessor: sp, parallelism: 1, } - assert.NotNil(t, pf.new(partition, offset)) + + processor := pf.new(partition, offset) + msg := &kmocks.Message{} + msg.On("Offset").Return(offset + 1) + processor.Process(msg) // This sleep is greater than offset manager's resetInterval to allow it a chance to // call MarkPartitionOffset. time.Sleep(150 * time.Millisecond) - mockConsumer.AssertCalled(t, "MarkPartitionOffset", topic, partition, offset, "") + mockConsumer.AssertCalled(t, "MarkPartitionOffset", topic, partition, offset+1, "") } type fakeService struct { diff --git a/cmd/ingester/main.go b/cmd/ingester/main.go index 55b4b46acac2..9ec891bf3c3d 100644 --- a/cmd/ingester/main.go +++ b/cmd/ingester/main.go @@ -37,7 +37,7 @@ import ( ) func main() { - var signalsChannel = make(chan os.Signal, 0) + var signalsChannel = make(chan os.Signal) signal.Notify(signalsChannel, os.Interrupt, syscall.SIGTERM) storageFactory, err := storage.NewFactory(storage.FactoryConfigFromEnvAndCLI(os.Args, os.Stderr)) @@ -47,7 +47,7 @@ func main() { v := viper.New() command := &cobra.Command{ - Use: "jaeger-ingester", + Use: "(experimental) jaeger-ingester", Short: "Jaeger ingester consumes from Kafka and writes to storage", Long: `Jaeger ingester consumes spans from a particular Kafka topic and writes them to all configured storage types.`, RunE: func(cmd *cobra.Command, args []string) error { @@ -91,21 +91,19 @@ func main() { consumer.Start() hc.Ready() - select { - case <-signalsChannel: - logger.Info("Jaeger Ingester is starting to close") - err := consumer.Close() + <-signalsChannel + logger.Info("Shutting down") + err = consumer.Close() + if err != nil { + logger.Error("Failed to close consumer", zap.Error(err)) + } + if closer, ok := spanWriter.(io.Closer); ok { + err := closer.Close() if err != nil { - logger.Error("Failed to close consumer", zap.Error(err)) - } - if closer, ok := spanWriter.(io.Closer); ok { - err := closer.Close() - if err != nil { - logger.Error("Failed to close span writer", zap.Error(err)) - } + logger.Error("Failed to close span writer", zap.Error(err)) } - logger.Info("Jaeger Ingester has finished closing") } + logger.Info("Shutdown complete") return nil }, } diff --git a/cmd/query/app/handler.go b/cmd/query/app/handler.go index 36ad56e5e521..512dcde1a76a 100644 --- a/cmd/query/app/handler.go +++ b/cmd/query/app/handler.go @@ -15,6 +15,7 @@ package app import ( + "context" "encoding/json" "fmt" "net/http" @@ -153,7 +154,7 @@ func (aH *APIHandler) route(route string, args ...interface{}) string { } func (aH *APIHandler) getServices(w http.ResponseWriter, r *http.Request) { - services, err := aH.spanReader.GetServices() + services, err := aH.spanReader.GetServices(r.Context()) if aH.handleError(w, err, http.StatusInternalServerError) { return } @@ -168,7 +169,7 @@ func (aH *APIHandler) getOperationsLegacy(w http.ResponseWriter, r *http.Request vars := mux.Vars(r) // given how getOperationsLegacy is bound to URL route, serviceParam cannot be empty service, _ := url.QueryUnescape(vars[serviceParam]) - operations, err := aH.spanReader.GetOperations(service) + operations, err := aH.spanReader.GetOperations(r.Context(), service) if aH.handleError(w, err, http.StatusInternalServerError) { return } @@ -186,7 +187,7 @@ func (aH *APIHandler) getOperations(w http.ResponseWriter, r *http.Request) { return } } - operations, err := aH.spanReader.GetOperations(service) + operations, err := aH.spanReader.GetOperations(r.Context(), service) if aH.handleError(w, err, http.StatusInternalServerError) { return } @@ -206,12 +207,12 @@ func (aH *APIHandler) search(w http.ResponseWriter, r *http.Request) { var uiErrors []structuredError var tracesFromStorage []*model.Trace if len(tQuery.traceIDs) > 0 { - tracesFromStorage, uiErrors, err = aH.tracesByIDs(tQuery.traceIDs) + tracesFromStorage, uiErrors, err = aH.tracesByIDs(r.Context(), tQuery.traceIDs) if aH.handleError(w, err, http.StatusInternalServerError) { return } } else { - tracesFromStorage, err = aH.spanReader.FindTraces(&tQuery.TraceQueryParameters) + tracesFromStorage, err = aH.spanReader.FindTraces(r.Context(), &tQuery.TraceQueryParameters) if aH.handleError(w, err, http.StatusInternalServerError) { return } @@ -233,11 +234,11 @@ func (aH *APIHandler) search(w http.ResponseWriter, r *http.Request) { aH.writeJSON(w, r, &structuredRes) } -func (aH *APIHandler) tracesByIDs(traceIDs []model.TraceID) ([]*model.Trace, []structuredError, error) { +func (aH *APIHandler) tracesByIDs(ctx context.Context, traceIDs []model.TraceID) ([]*model.Trace, []structuredError, error) { var errors []structuredError retMe := make([]*model.Trace, 0, len(traceIDs)) for _, traceID := range traceIDs { - if trace, err := trace(traceID, aH.spanReader, aH.archiveSpanReader); err != nil { + if trace, err := trace(ctx, traceID, aH.spanReader, aH.archiveSpanReader); err != nil { if err != spanstore.ErrTraceNotFound { return nil, nil, err } @@ -399,7 +400,7 @@ func (aH *APIHandler) withTraceFromReader( if !ok { return } - trace, err := trace(traceID, reader, backupReader) + trace, err := trace(r.Context(), traceID, reader, backupReader) if err == spanstore.ErrTraceNotFound { aH.handleError(w, err, http.StatusNotFound) return @@ -411,16 +412,17 @@ func (aH *APIHandler) withTraceFromReader( } func trace( + ctx context.Context, traceID model.TraceID, reader spanstore.Reader, backupReader spanstore.Reader, ) (*model.Trace, error) { - trace, err := reader.GetTrace(traceID) + trace, err := reader.GetTrace(ctx, traceID) if err == spanstore.ErrTraceNotFound { if backupReader == nil { return nil, err } - trace, err = backupReader.GetTrace(traceID) + trace, err = backupReader.GetTrace(ctx, traceID) } return trace, err } diff --git a/cmd/query/main.go b/cmd/query/main.go index b735a58570d6..46bb18caedb0 100644 --- a/cmd/query/main.go +++ b/cmd/query/main.go @@ -24,9 +24,11 @@ import ( "syscall" "github.com/gorilla/handlers" + "github.com/opentracing/opentracing-go" "github.com/spf13/cobra" "github.com/spf13/viper" jaegerClientConfig "github.com/uber/jaeger-client-go/config" + jaegerClientZapLog "github.com/uber/jaeger-client-go/log/zap" "go.uber.org/zap" "github.com/jaegertracing/jaeger/cmd/env" @@ -39,10 +41,11 @@ import ( "github.com/jaegertracing/jaeger/pkg/version" "github.com/jaegertracing/jaeger/plugin/storage" istorage "github.com/jaegertracing/jaeger/storage" + storageMetrics "github.com/jaegertracing/jaeger/storage/spanstore/metrics" ) func main() { - var serverChannel = make(chan os.Signal, 0) + var serverChannel = make(chan os.Signal) signal.Notify(serverChannel, os.Interrupt, syscall.SIGTERM) storageFactory, err := storage.NewFactory(storage.FactoryConfigFromEnvAndCLI(os.Args, os.Stderr)) @@ -86,11 +89,16 @@ func main() { Param: 1.0, }, RPCMetrics: true, - }.New("jaeger-query", jaegerClientConfig.Metrics(baseFactory.Namespace("client", nil))) + }.New( + "jaeger-query", + jaegerClientConfig.Metrics(baseFactory.Namespace("client", nil)), + jaegerClientConfig.Logger(jaegerClientZapLog.NewLogger(logger)), + ) if err != nil { logger.Fatal("Failed to initialize tracer", zap.Error(err)) } defer closer.Close() + opentracing.SetGlobalTracer(tracer) storageFactory.InitFromViper(v) if err := storageFactory.Initialize(baseFactory, logger); err != nil { @@ -100,6 +108,7 @@ func main() { if err != nil { logger.Fatal("Failed to create span reader", zap.Error(err)) } + spanReader = storageMetrics.NewReadMetricsDecorator(spanReader, baseFactory.Namespace("query", nil)) dependencyReader, err := storageFactory.CreateDependencyReader() if err != nil { logger.Fatal("Failed to create dependency reader", zap.Error(err)) @@ -131,7 +140,7 @@ func main() { recoveryHandler := recoveryhandler.NewRecoveryHandler(logger, true) go func() { - logger.Info("Starting jaeger-query HTTP server", zap.Int("port", queryOpts.Port)) + logger.Info("Starting HTTP server", zap.Int("port", queryOpts.Port)) if err := http.ListenAndServe(portStr, recoveryHandler(compressHandler)); err != nil { logger.Fatal("Could not launch service", zap.Error(err)) } @@ -139,11 +148,8 @@ func main() { }() hc.Ready() - - select { - case <-serverChannel: - logger.Info("Jaeger Query is finishing") - } + <-serverChannel + logger.Info("Shutdown complete") return nil }, } diff --git a/crossdock/services/mocks/AgentService.go b/crossdock/services/mocks/AgentService.go index 69f226344077..ee1cc207d5dc 100644 --- a/crossdock/services/mocks/AgentService.go +++ b/crossdock/services/mocks/AgentService.go @@ -14,7 +14,9 @@ package mocks -import "github.com/stretchr/testify/mock" +import ( + "github.com/stretchr/testify/mock" +) // AgentService is an autogenerated mock type for the AgentService type type AgentService struct { diff --git a/crossdock/services/mocks/QueryService.go b/crossdock/services/mocks/QueryService.go index 0f95b1fba3b1..774f46c20d7c 100644 --- a/crossdock/services/mocks/QueryService.go +++ b/crossdock/services/mocks/QueryService.go @@ -14,9 +14,11 @@ package mocks -import "github.com/stretchr/testify/mock" +import ( + "github.com/stretchr/testify/mock" -import ui "github.com/jaegertracing/jaeger/model/json" + ui "github.com/jaegertracing/jaeger/model/json" +) // QueryService is an autogenerated mock type for the QueryService type type QueryService struct { diff --git a/examples/hotrod/cmd/all.go b/examples/hotrod/cmd/all.go index b6aea3088102..ca4f32e82bf9 100644 --- a/examples/hotrod/cmd/all.go +++ b/examples/hotrod/cmd/all.go @@ -14,7 +14,9 @@ package cmd -import "github.com/spf13/cobra" +import ( + "github.com/spf13/cobra" +) // allCmd represents the all command var allCmd = &cobra.Command{ diff --git a/examples/hotrod/pkg/httperr/httperr.go b/examples/hotrod/pkg/httperr/httperr.go index 9351eea772b7..2f92cd936367 100644 --- a/examples/hotrod/pkg/httperr/httperr.go +++ b/examples/hotrod/pkg/httperr/httperr.go @@ -14,7 +14,9 @@ package httperr -import "net/http" +import ( + "net/http" +) // HandleError checks if the error is not nil, writes it to the output // with the specified status code, and returns true. If error is nil it returns false. diff --git a/examples/hotrod/services/customer/interface.go b/examples/hotrod/services/customer/interface.go index 976adc9ac25b..f8bc49a8de31 100644 --- a/examples/hotrod/services/customer/interface.go +++ b/examples/hotrod/services/customer/interface.go @@ -14,7 +14,9 @@ package customer -import "context" +import ( + "context" +) // Customer contains data about a customer. type Customer struct { diff --git a/examples/hotrod/services/driver/interface.go b/examples/hotrod/services/driver/interface.go index d20941f4f5e8..e6487c4c8510 100644 --- a/examples/hotrod/services/driver/interface.go +++ b/examples/hotrod/services/driver/interface.go @@ -14,7 +14,9 @@ package driver -import "context" +import ( + "context" +) // Driver describes a driver and the currentl car location. type Driver struct { diff --git a/examples/hotrod/services/route/interface.go b/examples/hotrod/services/route/interface.go index f65addce24d1..46ef091c9a45 100644 --- a/examples/hotrod/services/route/interface.go +++ b/examples/hotrod/services/route/interface.go @@ -14,8 +14,10 @@ package route -import "context" -import "time" +import ( + "context" + "time" +) // Route describes a route between Pickup and Dropoff locations and expected time to arrival. type Route struct { diff --git a/jaeger-ui b/jaeger-ui index 7896416c44d1..d148dc57488f 160000 --- a/jaeger-ui +++ b/jaeger-ui @@ -1 +1 @@ -Subproject commit 7896416c44d1e4f1d452a7b67dcc43c9d5a2b853 +Subproject commit d148dc57488f211e94a7216db0f29da42fa94ddf diff --git a/model/converter/json/domain_span_compare_test.go b/model/converter/json/domain_span_compare_test.go deleted file mode 100644 index 33a6a6d00bc7..000000000000 --- a/model/converter/json/domain_span_compare_test.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package json - -import ( - "encoding/json" - "testing" - - "github.com/kr/pretty" - "github.com/stretchr/testify/assert" - - "github.com/jaegertracing/jaeger/model" -) - -func CompareModelSpans(t *testing.T, expected *model.Span, actual *model.Span) { - model.SortSpan(expected) - model.SortSpan(actual) - - if !assert.EqualValues(t, expected, actual) { - for _, err := range pretty.Diff(expected, actual) { - t.Log(err) - } - out, err := json.Marshal(actual) - assert.NoError(t, err) - t.Logf("Actual trace: %s", string(out)) - } -} diff --git a/model/converter/json/from_domain_test.go b/model/converter/json/from_domain_test.go index 1588fbdb0c38..6e2b85ffd982 100644 --- a/model/converter/json/from_domain_test.go +++ b/model/converter/json/from_domain_test.go @@ -137,7 +137,7 @@ func testJSONEncoding(t *testing.T, i int, expectedStr []byte, object interface{ } require.NoError(t, enc.Encode(object)) - if !assert.Equal(t, string(expectedStr), string(buf.Bytes())) { + if !assert.Equal(t, string(expectedStr), buf.String()) { err := ioutil.WriteFile(outFile+"-actual.json", buf.Bytes(), 0644) assert.NoError(t, err) } diff --git a/model/converter/thrift/jaeger/to_domain.go b/model/converter/thrift/jaeger/to_domain.go index a56e606431eb..d3652f9bfc83 100644 --- a/model/converter/thrift/jaeger/to_domain.go +++ b/model/converter/thrift/jaeger/to_domain.go @@ -35,7 +35,6 @@ func ToDomainSpan(jSpan *jaeger.Span, jProcess *jaeger.Process) *model.Span { return toDomain{}.ToDomainSpan(jSpan, jProcess) } -// toDomain is a private struct that namespaces some conversion functions. It has access to its own private utility functions type toDomain struct{} func (td toDomain) ToDomain(jSpans []*jaeger.Span, jProcess *jaeger.Process) []*model.Span { diff --git a/model/converter/thrift/zipkin/process_hashtable.go b/model/converter/thrift/zipkin/process_hashtable.go index ce4ec0583a77..7e1208f61170 100644 --- a/model/converter/thrift/zipkin/process_hashtable.go +++ b/model/converter/thrift/zipkin/process_hashtable.go @@ -14,7 +14,9 @@ package zipkin -import "github.com/jaegertracing/jaeger/model" +import ( + "github.com/jaegertracing/jaeger/model" +) type processHashtable struct { processes map[uint64][]*model.Process diff --git a/model/converter/thrift/zipkin/to_domain.go b/model/converter/thrift/zipkin/to_domain.go index 54e0023600d9..254115bc8d84 100644 --- a/model/converter/thrift/zipkin/to_domain.go +++ b/model/converter/thrift/zipkin/to_domain.go @@ -279,7 +279,7 @@ func (td toDomain) getTags(binAnnotations []*zipkincore.BinaryAnnotation, tagInc func (td toDomain) transformBinaryAnnotation(binaryAnnotation *zipkincore.BinaryAnnotation) (model.KeyValue, error) { switch binaryAnnotation.AnnotationType { case zipkincore.AnnotationType_BOOL: - vBool := bytes.Compare(binaryAnnotation.Value, trueByteSlice) == 0 + vBool := bytes.Equal(binaryAnnotation.Value, trueByteSlice) return model.Bool(binaryAnnotation.Key, vBool), nil case zipkincore.AnnotationType_BYTES: return model.Binary(binaryAnnotation.Key, binaryAnnotation.Value), nil diff --git a/model/process.go b/model/process.go index 6b9e1126b2fc..145dc9298ea5 100644 --- a/model/process.go +++ b/model/process.go @@ -14,7 +14,9 @@ package model -import "io" +import ( + "io" +) // NewProcess creates a new Process for given serviceName and tags. // The tags are sorted in place and kept in the the same array/slice, diff --git a/model/time.go b/model/time.go index 15f28a9ce908..b3c841b698f7 100644 --- a/model/time.go +++ b/model/time.go @@ -14,7 +14,9 @@ package model -import "time" +import ( + "time" +) // EpochMicrosecondsAsTime converts microseconds since epoch to time.Time value. func EpochMicrosecondsAsTime(ts uint64) time.Time { diff --git a/pkg/cache/cache.go b/pkg/cache/cache.go index 931f3353f6f3..3dcf5d98987f 100644 --- a/pkg/cache/cache.go +++ b/pkg/cache/cache.go @@ -14,7 +14,9 @@ package cache -import "time" +import ( + "time" +) // A Cache is a generalized interface to a cache. See cache.LRU for a specific // implementation (bounded cache with LRU eviction) diff --git a/pkg/distributedlock/interface.go b/pkg/distributedlock/interface.go index 50c482dd6a6b..fa3e4e773da4 100644 --- a/pkg/distributedlock/interface.go +++ b/pkg/distributedlock/interface.go @@ -14,7 +14,9 @@ package distributedlock -import "time" +import ( + "time" +) // Lock uses distributed lock for control of a resource. type Lock interface { diff --git a/pkg/es/config/config.go b/pkg/es/config/config.go index 7305bc312ca7..6122c1a71816 100644 --- a/pkg/es/config/config.go +++ b/pkg/es/config/config.go @@ -42,6 +42,9 @@ type Configuration struct { BulkActions int BulkFlushInterval time.Duration IndexPrefix string + TagsFilePath string + AllTagsAsFields bool + TagDotReplacement string } // ClientBuilder creates new es.Client @@ -51,6 +54,9 @@ type ClientBuilder interface { GetNumReplicas() int64 GetMaxSpanAge() time.Duration GetIndexPrefix() string + GetTagsFilePath() string + GetAllTagsAsFields() bool + GetTagDotReplacement() string } // NewClient creates a new ElasticSearch client @@ -119,7 +125,7 @@ func (c *Configuration) ApplyDefaults(source *Configuration) { if c.Password == "" { c.Password = source.Password } - if c.Sniffer == false { + if !c.Sniffer { c.Sniffer = source.Sniffer } if c.MaxSpanAge == 0 { @@ -165,6 +171,22 @@ func (c *Configuration) GetIndexPrefix() string { return c.IndexPrefix } +// GetTagsFilePath returns a path to file containing tag keys +func (c *Configuration) GetTagsFilePath() string { + return c.TagsFilePath +} + +// GetAllTagsAsFields returns true if all tags should be stored as object fields +func (c *Configuration) GetAllTagsAsFields() bool { + return c.AllTagsAsFields +} + +// GetTagDotReplacement returns character is used to replace dots in tag keys, when +// the tag is stored as object field. +func (c *Configuration) GetTagDotReplacement() string { + return c.TagDotReplacement +} + // GetConfigs wraps the configs to feed to the ElasticSearch client init func (c *Configuration) GetConfigs() []elastic.ClientOptionFunc { options := make([]elastic.ClientOptionFunc, 3) diff --git a/pkg/recoveryhandler/zap.go b/pkg/recoveryhandler/zap.go index 5033955564e3..d1cd3f3458e3 100644 --- a/pkg/recoveryhandler/zap.go +++ b/pkg/recoveryhandler/zap.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Uber Technologies, Inc. +// Copyright (c) 2017-2018 Uber Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -28,9 +28,8 @@ type zapRecoveryWrapper struct { } // Println logs an error message with the given fields -func (z zapRecoveryWrapper) Println(fields ...interface{}) { - // if you think i'm going to check the type of each of the fields and then logger with fields, you're crazy. - z.logger.Error(fmt.Sprintln(fields)) +func (z zapRecoveryWrapper) Println(args ...interface{}) { + z.logger.Error(fmt.Sprint(args...)) } // NewRecoveryHandler returns an http.Handler that recovers on panics diff --git a/pkg/recoveryhandler/zap_test.go b/pkg/recoveryhandler/zap_test.go index 847390d43de6..0957865f3559 100644 --- a/pkg/recoveryhandler/zap_test.go +++ b/pkg/recoveryhandler/zap_test.go @@ -40,6 +40,6 @@ func TestNewRecoveryHandler(t *testing.T) { assert.Equal(t, http.StatusInternalServerError, res.Code) assert.Equal(t, map[string]string{ "level": "error", - "msg": "[Unexpected error!]\n", + "msg": "Unexpected error!", }, log.JSONLine(0)) } diff --git a/pkg/testutils/logger_test.go b/pkg/testutils/logger_test.go index fab8c5136bb8..402bdfe53309 100644 --- a/pkg/testutils/logger_test.go +++ b/pkg/testutils/logger_test.go @@ -50,13 +50,13 @@ func TestRaceCondition(t *testing.T) { finish.Add(2) go func() { - _ = <-start + <-start logger.Info("test") finish.Done() }() go func() { - _ = <-start + <-start buffer.Lines() buffer.Stripped() _ = buffer.String() diff --git a/plugin/storage/cassandra/samplingstore/storage_test.go b/plugin/storage/cassandra/samplingstore/storage_test.go index c71a619a917e..fc55d772b8aa 100644 --- a/plugin/storage/cassandra/samplingstore/storage_test.go +++ b/plugin/storage/cassandra/samplingstore/storage_test.go @@ -420,8 +420,22 @@ func TestStringToProbabilitiesAndQPS(t *testing.T) { probabilities := s.stringToProbabilitiesAndQPS(testStr) assert.Len(t, probabilities, 2) - assert.Equal(t, map[string]*model.ProbabilityAndQPS{"GET": {0.001, 63.2}, "PUT": {0.002, 0.0}}, probabilities["svc1"]) - assert.Equal(t, map[string]*model.ProbabilityAndQPS{"GET": {0.5, 34.2}}, probabilities["svc2"]) + assert.Equal(t, map[string]*model.ProbabilityAndQPS{ + "GET": { + Probability: 0.001, + QPS: 63.2, + }, + "PUT": { + Probability: 0.002, + QPS: 0.0, + }, + }, probabilities["svc1"]) + assert.Equal(t, map[string]*model.ProbabilityAndQPS{ + "GET": { + Probability: 0.5, + QPS: 34.2, + }, + }, probabilities["svc2"]) } func TestStringToProbabilities(t *testing.T) { diff --git a/plugin/storage/cassandra/savetracetest/main.go b/plugin/storage/cassandra/savetracetest/main.go index 5c6805daf849..73c1ffe8d59a 100644 --- a/plugin/storage/cassandra/savetracetest/main.go +++ b/plugin/storage/cassandra/savetracetest/main.go @@ -15,6 +15,7 @@ package main import ( + "context" "time" "github.com/uber/jaeger-lib/metrics" @@ -50,7 +51,8 @@ func main() { logger.Info("Saved span", zap.String("spanID", getSomeSpan().SpanID.String())) } s := getSomeSpan() - trace, err := spanReader.GetTrace(s.TraceID) + ctx := context.Background() + trace, err := spanReader.GetTrace(ctx, s.TraceID) if err != nil { logger.Fatal("Failed to read", zap.Error(err)) } else { @@ -63,27 +65,27 @@ func main() { StartTimeMax: time.Now().Add(time.Hour), } logger.Info("Check main query") - queryAndPrint(spanReader, tqp) + queryAndPrint(ctx, spanReader, tqp) tqp.OperationName = "opName" logger.Info("Check query with operation") - queryAndPrint(spanReader, tqp) + queryAndPrint(ctx, spanReader, tqp) tqp.Tags = map[string]string{ "someKey": "someVal", } logger.Info("Check query with operation name and tags") - queryAndPrint(spanReader, tqp) + queryAndPrint(ctx, spanReader, tqp) tqp.DurationMin = 0 tqp.DurationMax = time.Hour tqp.Tags = map[string]string{} logger.Info("check query with duration") - queryAndPrint(spanReader, tqp) + queryAndPrint(ctx, spanReader, tqp) } -func queryAndPrint(spanReader *cSpanStore.SpanReader, tqp *spanstore.TraceQueryParameters) { - traces, err := spanReader.FindTraces(tqp) +func queryAndPrint(ctx context.Context, spanReader *cSpanStore.SpanReader, tqp *spanstore.TraceQueryParameters) { + traces, err := spanReader.FindTraces(ctx, tqp) if err != nil { logger.Fatal("Failed to query", zap.Error(err)) } else { diff --git a/plugin/storage/cassandra/spanstore/dbmodel/log_fields_filter.go b/plugin/storage/cassandra/spanstore/dbmodel/log_fields_filter.go index 8bfd11c431b8..fec9f957345a 100644 --- a/plugin/storage/cassandra/spanstore/dbmodel/log_fields_filter.go +++ b/plugin/storage/cassandra/spanstore/dbmodel/log_fields_filter.go @@ -14,7 +14,9 @@ package dbmodel -import "github.com/jaegertracing/jaeger/model" +import ( + "github.com/jaegertracing/jaeger/model" +) // LogFieldsFilter filters all span.Logs.Fields. type LogFieldsFilter struct { diff --git a/plugin/storage/cassandra/spanstore/dbmodel/unique_tags.go b/plugin/storage/cassandra/spanstore/dbmodel/unique_tags.go index 1f95ac5c5319..ccdc9a4de8b2 100644 --- a/plugin/storage/cassandra/spanstore/dbmodel/unique_tags.go +++ b/plugin/storage/cassandra/spanstore/dbmodel/unique_tags.go @@ -14,7 +14,9 @@ package dbmodel -import "github.com/jaegertracing/jaeger/model" +import ( + "github.com/jaegertracing/jaeger/model" +) // GetAllUniqueTags creates a list of all unique tags from a set of filtered tags. func GetAllUniqueTags(span *model.Span, tagFilter TagFilter) []TagInsertion { diff --git a/plugin/storage/cassandra/spanstore/reader.go b/plugin/storage/cassandra/spanstore/reader.go index 26b62d3c4eb4..7b751c51ca0f 100644 --- a/plugin/storage/cassandra/spanstore/reader.go +++ b/plugin/storage/cassandra/spanstore/reader.go @@ -15,8 +15,12 @@ package spanstore import ( + "context" "time" + "github.com/opentracing/opentracing-go" + ottag "github.com/opentracing/opentracing-go/ext" + otlog "github.com/opentracing/opentracing-go/log" "github.com/pkg/errors" "github.com/uber/jaeger-lib/metrics" "go.uber.org/zap" @@ -132,17 +136,27 @@ func NewSpanReader( } // GetServices returns all services traced by Jaeger -func (s *SpanReader) GetServices() ([]string, error) { +func (s *SpanReader) GetServices(ctx context.Context) ([]string, error) { return s.serviceNamesReader() } // GetOperations returns all operations for a specific service traced by Jaeger -func (s *SpanReader) GetOperations(service string) ([]string, error) { +func (s *SpanReader) GetOperations(ctx context.Context, service string) ([]string, error) { return s.operationNamesReader(service) } -func (s *SpanReader) readTrace(traceID dbmodel.TraceID) (*model.Trace, error) { +func (s *SpanReader) readTrace(ctx context.Context, traceID dbmodel.TraceID) (*model.Trace, error) { + span, ctx := startSpanForQuery(ctx, "readTrace", querySpanByTraceID) + defer span.Finish() + span.LogFields(otlog.String("event", "searching"), otlog.Object("trace_id", traceID)) + + trace, err := s.readTraceInSpan(ctx, traceID) + logErrorToSpan(span, err) + return trace, err +} + +func (s *SpanReader) readTraceInSpan(ctx context.Context, traceID dbmodel.TraceID) (*model.Trace, error) { start := time.Now() q := s.session.Query(querySpanByTraceID, traceID) i := q.Iter() @@ -172,7 +186,6 @@ func (s *SpanReader) readTrace(traceID dbmodel.TraceID) (*model.Trace, error) { } span, err := dbmodel.ToDomain(&dbSpan) if err != nil { - //do we consider conversion failure to cause such metrics to be emitted? for now i'm assuming yes. s.metrics.readTraces.Emit(err, time.Since(start)) return nil, err } @@ -191,8 +204,8 @@ func (s *SpanReader) readTrace(traceID dbmodel.TraceID) (*model.Trace, error) { } // GetTrace takes a traceID and returns a Trace associated with that traceID -func (s *SpanReader) GetTrace(traceID model.TraceID) (*model.Trace, error) { - return s.readTrace(dbmodel.TraceIDFromDomain(traceID)) +func (s *SpanReader) GetTrace(ctx context.Context, traceID model.TraceID) (*model.Trace, error) { + return s.readTrace(ctx, dbmodel.TraceIDFromDomain(traceID)) } func validateQuery(p *spanstore.TraceQueryParameters) error { @@ -218,14 +231,14 @@ func validateQuery(p *spanstore.TraceQueryParameters) error { } // FindTraces retrieves traces that match the traceQuery -func (s *SpanReader) FindTraces(traceQuery *spanstore.TraceQueryParameters) ([]*model.Trace, error) { +func (s *SpanReader) FindTraces(ctx context.Context, traceQuery *spanstore.TraceQueryParameters) ([]*model.Trace, error) { if err := validateQuery(traceQuery); err != nil { return nil, err } if traceQuery.NumTraces == 0 { traceQuery.NumTraces = defaultNumTraces } - uniqueTraceIDs, err := s.findTraceIDs(traceQuery) + uniqueTraceIDs, err := s.findTraceIDs(ctx, traceQuery) if err != nil { return nil, err } @@ -234,7 +247,7 @@ func (s *SpanReader) FindTraces(traceQuery *spanstore.TraceQueryParameters) ([]* if len(retMe) >= traceQuery.NumTraces { break } - jTrace, err := s.readTrace(traceID) + jTrace, err := s.readTrace(ctx, traceID) if err != nil { s.logger.Error("Failure to read trace", zap.String("trace_id", traceID.String()), zap.Error(err)) continue @@ -244,18 +257,18 @@ func (s *SpanReader) FindTraces(traceQuery *spanstore.TraceQueryParameters) ([]* return retMe, nil } -func (s *SpanReader) findTraceIDs(traceQuery *spanstore.TraceQueryParameters) (dbmodel.UniqueTraceIDs, error) { +func (s *SpanReader) findTraceIDs(ctx context.Context, traceQuery *spanstore.TraceQueryParameters) (dbmodel.UniqueTraceIDs, error) { if traceQuery.DurationMin != 0 || traceQuery.DurationMax != 0 { - return s.queryByDuration(traceQuery) + return s.queryByDuration(ctx, traceQuery) } if traceQuery.OperationName != "" { - traceIds, err := s.queryByServiceNameAndOperation(traceQuery) + traceIds, err := s.queryByServiceNameAndOperation(ctx, traceQuery) if err != nil { return nil, err } if len(traceQuery.Tags) > 0 { - tagTraceIds, err := s.queryByTagsAndLogs(traceQuery) + tagTraceIds, err := s.queryByTagsAndLogs(ctx, traceQuery) if err != nil { return nil, err } @@ -267,14 +280,19 @@ func (s *SpanReader) findTraceIDs(traceQuery *spanstore.TraceQueryParameters) (d return traceIds, nil } if len(traceQuery.Tags) > 0 { - return s.queryByTagsAndLogs(traceQuery) + return s.queryByTagsAndLogs(ctx, traceQuery) } - return s.queryByService(traceQuery) + return s.queryByService(ctx, traceQuery) } -func (s *SpanReader) queryByTagsAndLogs(tq *spanstore.TraceQueryParameters) (dbmodel.UniqueTraceIDs, error) { +func (s *SpanReader) queryByTagsAndLogs(ctx context.Context, tq *spanstore.TraceQueryParameters) (dbmodel.UniqueTraceIDs, error) { + span, ctx := startSpanForQuery(ctx, "queryByTagsAndLogs", queryByTag) + defer span.Finish() + results := make([]dbmodel.UniqueTraceIDs, 0, len(tq.Tags)) for k, v := range tq.Tags { + childSpan := opentracing.StartSpan("queryByTag") + childSpan.LogFields(otlog.String("tag.key", k), otlog.String("tag.value", v)) query := s.session.Query( queryByTag, tq.ServiceName, @@ -284,7 +302,8 @@ func (s *SpanReader) queryByTagsAndLogs(tq *spanstore.TraceQueryParameters) (dbm model.TimeAsEpochMicroseconds(tq.StartTimeMax), tq.NumTraces*limitMultiple, ).PageSize(0) - t, err := s.executeQuery(query, s.metrics.queryTagIndex) + t, err := s.executeQuery(childSpan, query, s.metrics.queryTagIndex) + childSpan.Finish() if err != nil { return nil, err } @@ -293,7 +312,10 @@ func (s *SpanReader) queryByTagsAndLogs(tq *spanstore.TraceQueryParameters) (dbm return dbmodel.IntersectTraceIDs(results), nil } -func (s *SpanReader) queryByDuration(traceQuery *spanstore.TraceQueryParameters) (dbmodel.UniqueTraceIDs, error) { +func (s *SpanReader) queryByDuration(ctx context.Context, traceQuery *spanstore.TraceQueryParameters) (dbmodel.UniqueTraceIDs, error) { + span, ctx := startSpanForQuery(ctx, "queryByDuration", queryByDuration) + defer span.Finish() + results := dbmodel.UniqueTraceIDs{} minDurationMicros := traceQuery.DurationMin.Nanoseconds() / int64(time.Microsecond/time.Nanosecond) @@ -308,6 +330,8 @@ func (s *SpanReader) queryByDuration(traceQuery *spanstore.TraceQueryParameters) endTimeByHour := traceQuery.StartTimeMax.Round(durationBucketSize) for timeBucket := endTimeByHour; timeBucket.After(startTimeByHour) || timeBucket.Equal(startTimeByHour); timeBucket = timeBucket.Add(-1 * durationBucketSize) { + childSpan := opentracing.StartSpan("queryForTimeBucket") + childSpan.LogFields(otlog.String("timeBucket", timeBucket.String())) query := s.session.Query( queryByDuration, timeBucket, @@ -316,7 +340,8 @@ func (s *SpanReader) queryByDuration(traceQuery *spanstore.TraceQueryParameters) minDurationMicros, maxDurationMicros, traceQuery.NumTraces*limitMultiple) - t, err := s.executeQuery(query, s.metrics.queryDurationIndex) + t, err := s.executeQuery(childSpan, query, s.metrics.queryDurationIndex) + childSpan.Finish() if err != nil { return nil, err } @@ -331,7 +356,9 @@ func (s *SpanReader) queryByDuration(traceQuery *spanstore.TraceQueryParameters) return results, nil } -func (s *SpanReader) queryByServiceNameAndOperation(tq *spanstore.TraceQueryParameters) (dbmodel.UniqueTraceIDs, error) { +func (s *SpanReader) queryByServiceNameAndOperation(ctx context.Context, tq *spanstore.TraceQueryParameters) (dbmodel.UniqueTraceIDs, error) { + span, ctx := startSpanForQuery(ctx, "queryByServiceNameAndOperation", queryByServiceAndOperationName) + defer span.Finish() query := s.session.Query( queryByServiceAndOperationName, tq.ServiceName, @@ -340,10 +367,12 @@ func (s *SpanReader) queryByServiceNameAndOperation(tq *spanstore.TraceQueryPara model.TimeAsEpochMicroseconds(tq.StartTimeMax), tq.NumTraces*limitMultiple, ).PageSize(0) - return s.executeQuery(query, s.metrics.queryServiceOperationIndex) + return s.executeQuery(span, query, s.metrics.queryServiceOperationIndex) } -func (s *SpanReader) queryByService(tq *spanstore.TraceQueryParameters) (dbmodel.UniqueTraceIDs, error) { +func (s *SpanReader) queryByService(ctx context.Context, tq *spanstore.TraceQueryParameters) (dbmodel.UniqueTraceIDs, error) { + span, ctx := startSpanForQuery(ctx, "queryByService", queryByServiceName) + defer span.Finish() query := s.session.Query( queryByServiceName, tq.ServiceName, @@ -351,10 +380,10 @@ func (s *SpanReader) queryByService(tq *spanstore.TraceQueryParameters) (dbmodel model.TimeAsEpochMicroseconds(tq.StartTimeMax), tq.NumTraces*limitMultiple, ).PageSize(0) - return s.executeQuery(query, s.metrics.queryServiceNameIndex) + return s.executeQuery(span, query, s.metrics.queryServiceNameIndex) } -func (s *SpanReader) executeQuery(query cassandra.Query, tableMetrics *casMetrics.Table) (dbmodel.UniqueTraceIDs, error) { +func (s *SpanReader) executeQuery(span opentracing.Span, query cassandra.Query, tableMetrics *casMetrics.Table) (dbmodel.UniqueTraceIDs, error) { start := time.Now() i := query.Iter() retMe := dbmodel.UniqueTraceIDs{} @@ -365,8 +394,25 @@ func (s *SpanReader) executeQuery(query cassandra.Query, tableMetrics *casMetric err := i.Close() tableMetrics.Emit(err, time.Since(start)) if err != nil { + logErrorToSpan(span, err) s.logger.Error("Failed to exec query", zap.Error(err)) return nil, err } return retMe, nil } + +func startSpanForQuery(ctx context.Context, name, query string) (opentracing.Span, context.Context) { + span, ctx := opentracing.StartSpanFromContext(ctx, name) + ottag.DBStatement.Set(span, query) + ottag.DBType.Set(span, "cassandra") + ottag.Component.Set(span, "gocql") + return span, ctx +} + +func logErrorToSpan(span opentracing.Span, err error) { + if err == nil { + return + } + ottag.Error.Set(span, true) + span.LogFields(otlog.Error(err)) +} diff --git a/plugin/storage/cassandra/spanstore/reader_test.go b/plugin/storage/cassandra/spanstore/reader_test.go index 10265311f323..8499aa6800ac 100644 --- a/plugin/storage/cassandra/spanstore/reader_test.go +++ b/plugin/storage/cassandra/spanstore/reader_test.go @@ -15,6 +15,7 @@ package spanstore import ( + "context" "errors" "strings" "testing" @@ -59,7 +60,7 @@ var _ spanstore.Reader = &SpanReader{} // check API conformance func TestSpanReaderGetServices(t *testing.T) { withSpanReader(func(r *spanReaderTest) { r.reader.serviceNamesReader = func() ([]string, error) { return []string{"service-a"}, nil } - s, err := r.reader.GetServices() + s, err := r.reader.GetServices(context.Background()) assert.NoError(t, err) assert.Equal(t, []string{"service-a"}, s) }) @@ -68,7 +69,7 @@ func TestSpanReaderGetServices(t *testing.T) { func TestSpanReaderGetOperations(t *testing.T) { withSpanReader(func(r *spanReaderTest) { r.reader.operationNamesReader = func(string) ([]string, error) { return []string{"operation-a"}, nil } - s, err := r.reader.GetOperations("service-x") + s, err := r.reader.GetOperations(context.Background(), "service-x") assert.NoError(t, err) assert.Equal(t, []string{"operation-a"}, s) }) @@ -117,7 +118,7 @@ func TestSpanReaderGetTrace(t *testing.T) { r.session.On("Query", mock.AnythingOfType("string"), matchEverything()).Return(query) - trace, err := r.reader.GetTrace(model.TraceID{}) + trace, err := r.reader.GetTrace(context.Background(), model.TraceID{}) if testCase.expectedErr == "" { assert.NoError(t, err) assert.NotNil(t, trace) @@ -143,7 +144,7 @@ func TestSpanReaderGetTrace_TraceNotFound(t *testing.T) { r.session.On("Query", mock.AnythingOfType("string"), matchEverything()).Return(query) - trace, err := r.reader.GetTrace(model.TraceID{}) + trace, err := r.reader.GetTrace(context.Background(), model.TraceID{}) assert.Nil(t, trace) assert.EqualError(t, err, "trace not found") }) @@ -151,7 +152,7 @@ func TestSpanReaderGetTrace_TraceNotFound(t *testing.T) { func TestSpanReaderFindTracesBadRequest(t *testing.T) { withSpanReader(func(r *spanReaderTest) { - _, err := r.reader.FindTraces(nil) + _, err := r.reader.FindTraces(context.Background(), nil) assert.Error(t, err) }) } @@ -353,7 +354,7 @@ func TestSpanReaderFindTraces(t *testing.T) { queryParams.DurationMax = time.Minute * 3 } - res, err := r.reader.FindTraces(queryParams) + res, err := r.reader.FindTraces(context.Background(), queryParams) if testCase.expectedError == "" { assert.NoError(t, err) assert.Len(t, res, testCase.expectedCount, "expecting certain number of traces") diff --git a/plugin/storage/es/factory.go b/plugin/storage/es/factory.go index 68d6d5a31e1a..d3b15d319d26 100644 --- a/plugin/storage/es/factory.go +++ b/plugin/storage/es/factory.go @@ -15,7 +15,11 @@ package es import ( + "bufio" "flag" + "os" + "path/filepath" + "strings" "github.com/spf13/viper" "github.com/uber/jaeger-lib/metrics" @@ -74,16 +78,58 @@ func (f *Factory) Initialize(metricsFactory metrics.Factory, logger *zap.Logger) // CreateSpanReader implements storage.Factory func (f *Factory) CreateSpanReader() (spanstore.Reader, error) { cfg := f.primaryConfig - return esSpanStore.NewSpanReader(f.primaryClient, f.logger, cfg.GetMaxSpanAge(), f.metricsFactory, cfg.GetIndexPrefix()), nil + return esSpanStore.NewSpanReader(esSpanStore.SpanReaderParams{ + Client: f.primaryClient, + Logger: f.logger, + MetricsFactory: f.metricsFactory, + MaxLookback: cfg.GetMaxSpanAge(), + IndexPrefix: cfg.GetIndexPrefix(), + TagDotReplacement: cfg.GetTagDotReplacement(), + }), nil } // CreateSpanWriter implements storage.Factory func (f *Factory) CreateSpanWriter() (spanstore.Writer, error) { cfg := f.primaryConfig - return esSpanStore.NewSpanWriter(f.primaryClient, f.logger, f.metricsFactory, cfg.GetNumShards(), cfg.GetNumReplicas(), cfg.GetIndexPrefix()), nil + var tags []string + if cfg.GetTagsFilePath() != "" { + var err error + if tags, err = loadTagsFromFile(cfg.GetTagsFilePath()); err != nil { + f.logger.Error("Could not open file with tags", zap.Error(err)) + return nil, err + } + } + return esSpanStore.NewSpanWriter(esSpanStore.SpanWriterParams{Client: f.primaryClient, + Logger: f.logger, + MetricsFactory: f.metricsFactory, + NumShards: f.primaryConfig.GetNumShards(), + NumReplicas: f.primaryConfig.GetNumReplicas(), + IndexPrefix: f.primaryConfig.GetIndexPrefix(), + AllTagsAsFields: f.primaryConfig.GetAllTagsAsFields(), + TagKeysAsFields: tags, + TagDotReplacement: f.primaryConfig.GetTagDotReplacement(), + }), nil } // CreateDependencyReader implements storage.Factory func (f *Factory) CreateDependencyReader() (dependencystore.Reader, error) { return esDepStore.NewDependencyStore(f.primaryClient, f.logger, f.primaryConfig.GetIndexPrefix()), nil } + +func loadTagsFromFile(filePath string) ([]string, error) { + file, err := os.Open(filepath.Clean(filePath)) + if err != nil { + return nil, err + } + defer file.Close() + + scanner := bufio.NewScanner(file) + var tags []string + for scanner.Scan() { + line := scanner.Text() + if tag := strings.TrimSpace(line); tag != "" { + tags = append(tags, tag) + } + } + return tags, nil +} diff --git a/plugin/storage/es/factory_test.go b/plugin/storage/es/factory_test.go index 1ba85df5117f..c429cd545a3f 100644 --- a/plugin/storage/es/factory_test.go +++ b/plugin/storage/es/factory_test.go @@ -19,6 +19,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/uber/jaeger-lib/metrics" "go.uber.org/zap" @@ -66,3 +67,45 @@ func TestElasticsearchFactory(t *testing.T) { _, err = f.CreateDependencyReader() assert.NoError(t, err) } + +func TestElasticsearchTagsFileDoNotExist(t *testing.T) { + f := NewFactory() + mockConf := &mockClientBuilder{} + mockConf.TagsFilePath = "fixtures/tags_foo.txt" + f.primaryConfig = mockConf + assert.NoError(t, f.Initialize(metrics.NullFactory, zap.NewNop())) + r, err := f.CreateSpanWriter() + require.Error(t, err) + assert.Nil(t, r) +} + +func TestLoadTagsFromFile(t *testing.T) { + tests := []struct { + path string + tags []string + error bool + }{ + { + path: "fixtures/do_not_exists.txt", + error: true, + }, + { + path: "fixtures/tags_01.txt", + tags: []string{"foo", "bar", "space"}, + }, + { + path: "fixtures/tags_02.txt", + tags: nil, + }, + } + + for _, test := range tests { + tags, err := loadTagsFromFile(test.path) + if test.error { + require.Error(t, err) + assert.Nil(t, tags) + } else { + assert.Equal(t, test.tags, tags) + } + } +} diff --git a/plugin/storage/es/fixtures/tags_01.txt b/plugin/storage/es/fixtures/tags_01.txt new file mode 100644 index 000000000000..ec834979a31c --- /dev/null +++ b/plugin/storage/es/fixtures/tags_01.txt @@ -0,0 +1,3 @@ +foo +bar + space diff --git a/plugin/storage/es/fixtures/tags_02.txt b/plugin/storage/es/fixtures/tags_02.txt new file mode 100644 index 000000000000..fd40910d9e70 --- /dev/null +++ b/plugin/storage/es/fixtures/tags_02.txt @@ -0,0 +1,4 @@ + + + + diff --git a/plugin/storage/es/options.go b/plugin/storage/es/options.go index 8fac6c70ce9a..0e650499e7a9 100644 --- a/plugin/storage/es/options.go +++ b/plugin/storage/es/options.go @@ -37,6 +37,10 @@ const ( suffixBulkActions = ".bulk.actions" suffixBulkFlushInterval = ".bulk.flush-interval" suffixIndexPrefix = ".index-prefix" + suffixTagsAsFields = ".tags-as-fields" + suffixTagsAsFieldsAll = suffixTagsAsFields + ".all" + suffixTagsFile = suffixTagsAsFields + ".config-file" + suffixTagDeDotChar = suffixTagsAsFields + ".dot-replacement" ) // TODO this should be moved next to config.Configuration struct (maybe ./flags package) @@ -75,6 +79,7 @@ func NewOptions(primaryNamespace string, otherNamespaces ...string) *Options { BulkWorkers: 1, BulkActions: 1000, BulkFlushInterval: time.Millisecond * 200, + TagDotReplacement: "@", }, servers: "http://127.0.0.1:9200", namespace: primaryNamespace, @@ -146,6 +151,18 @@ func addFlags(flagSet *flag.FlagSet, nsConfig *namespaceConfig) { nsConfig.namespace+suffixIndexPrefix, nsConfig.IndexPrefix, "Optional prefix of Jaeger indices. For example \"production\" creates \"production:jaeger-*\".") + flagSet.Bool( + nsConfig.namespace+suffixTagsAsFieldsAll, + nsConfig.AllTagsAsFields, + "(experimental) Store all span and process tags as object fields. If true "+suffixTagsFile+" is ignored. Binary tags are always stored as nested objects.") + flagSet.String( + nsConfig.namespace+suffixTagsFile, + nsConfig.TagsFilePath, + "(experimental) Optional path to a file containing tag keys which will be stored as object fields. Each key should be on a separate line.") + flagSet.String( + nsConfig.namespace+suffixTagDeDotChar, + nsConfig.TagDotReplacement, + "(experimental) The character used to replace dots (\".\") in tag keys stored as object fields.") } // InitFromViper initializes Options with properties from viper @@ -169,6 +186,9 @@ func initFromViper(cfg *namespaceConfig, v *viper.Viper) { cfg.BulkActions = v.GetInt(cfg.namespace + suffixBulkActions) cfg.BulkFlushInterval = v.GetDuration(cfg.namespace + suffixBulkFlushInterval) cfg.IndexPrefix = v.GetString(cfg.namespace + suffixIndexPrefix) + cfg.AllTagsAsFields = v.GetBool(cfg.namespace + suffixTagsAsFieldsAll) + cfg.TagsFilePath = v.GetString(cfg.namespace + suffixTagsFile) + cfg.TagDotReplacement = v.GetString(cfg.namespace + suffixTagDeDotChar) } // GetPrimary returns primary configuration. diff --git a/plugin/storage/es/spanstore/dbmodel/fixtures/domain_01.json b/plugin/storage/es/spanstore/dbmodel/fixtures/domain_01.json new file mode 100644 index 000000000000..420d4e2bb6e2 --- /dev/null +++ b/plugin/storage/es/spanstore/dbmodel/fixtures/domain_01.json @@ -0,0 +1,89 @@ +{ + "traceId": "AAAAAAAAAAAAAAAAAAAAAQ==", + "spanId": "AAAAAAAAAAI=", + "operationName": "test-general-conversion", + "references": [ + { + "refType": "CHILD_OF", + "traceId": "AAAAAAAAAAAAAAAAAAAAAQ==", + "spanId": "AAAAAAAAAAM=" + }, + { + "refType": "FOLLOWS_FROM", + "traceId": "AAAAAAAAAAAAAAAAAAAAAQ==", + "spanId": "AAAAAAAAAAQ=" + }, + { + "refType": "CHILD_OF", + "traceId": "AAAAAAAAAAAAAAAAAAAA/w==", + "spanId": "AAAAAAAAAP8=" + } + ], + "flags": 1, + "startTime": "2017-01-26T16:46:31.639875-05:00", + "duration": "5000ns", + "tags": [ + { + "key": "peer.service", + "vType": "STRING", + "vStr": "service-y" + }, + { + "key": "peer.ipv4", + "vType": "INT64", + "vInt64": 23456 + }, + { + "key": "error", + "vType": "BOOL", + "vBool": true + }, + { + "key": "temperature", + "vType": "FLOAT64", + "vFloat64": 72.5 + }, + { + "key": "blob", + "vType": "BINARY", + "vBinary": "AAAwOQ==" + } + ], + "logs": [ + { + "timestamp": "2017-01-26T16:46:31.639875-05:00", + "fields": [ + { + "key": "event", + "vType": "INT64", + "vInt64": 123415 + } + ] + }, + { + "timestamp": "2017-01-26T16:46:31.639875-05:00", + "fields": [ + { + "key": "x", + "vType": "STRING", + "vStr": "y" + } + ] + } + ], + "process": { + "serviceName": "service-x", + "tags": [ + { + "key": "peer.ipv4", + "vType": "INT64", + "vInt64": 23456 + }, + { + "key": "error", + "vType": "BOOL", + "vBool": true + } + ] + } +} diff --git a/plugin/storage/es/spanstore/dbmodel/fixtures/es_01.json b/plugin/storage/es/spanstore/dbmodel/fixtures/es_01.json new file mode 100644 index 000000000000..d1cfbabcf419 --- /dev/null +++ b/plugin/storage/es/spanstore/dbmodel/fixtures/es_01.json @@ -0,0 +1,90 @@ +{ + "traceID": "1", + "spanID": "2", + "flags": 1, + "operationName": "test-general-conversion", + "references": [ + { + "refType": "CHILD_OF", + "traceID": "1", + "spanID": "3" + }, + { + "refType": "FOLLOWS_FROM", + "traceID": "1", + "spanID": "4" + }, + { + "refType": "CHILD_OF", + "traceID": "ff", + "spanID": "ff" + } + ], + "startTime": 1485467191639875, + "startTimeMillis": 1485467191639, + "duration": 5, + "tags": [ + { + "key": "peer.service", + "type": "string", + "value": "service-y" + }, + { + "key": "peer.ipv4", + "type": "int64", + "value": "23456" + }, + { + "key": "error", + "type": "bool", + "value": "true" + }, + { + "key": "temperature", + "type": "float64", + "value": "72.5" + }, + { + "key": "blob", + "type": "binary", + "value": "00003039" + } + ], + "logs": [ + { + "timestamp": 1485467191639875, + "fields": [ + { + "key": "event", + "type": "int64", + "value": "123415" + } + ] + }, + { + "timestamp": 1485467191639875, + "fields": [ + { + "key": "x", + "type": "string", + "value": "y" + } + ] + } + ], + "process": { + "serviceName": "service-x", + "tags": [ + { + "key": "peer.ipv4", + "type": "int64", + "value": "23456" + }, + { + "key": "error", + "type": "bool", + "value": "true" + } + ] + } +} diff --git a/plugin/storage/es/spanstore/dbmodel/from_domain.go b/plugin/storage/es/spanstore/dbmodel/from_domain.go new file mode 100644 index 000000000000..3e65b7482d65 --- /dev/null +++ b/plugin/storage/es/spanstore/dbmodel/from_domain.go @@ -0,0 +1,136 @@ +// Copyright (c) 2018 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbmodel + +import ( + "strings" + + "github.com/jaegertracing/jaeger/model" +) + +// NewFromDomain creates FromDomain used to convert model span to db span +func NewFromDomain(allTagsAsObject bool, tagKeysAsFields []string, tagDotReplacement string) FromDomain { + tags := map[string]bool{} + for _, k := range tagKeysAsFields { + tags[k] = true + } + return FromDomain{allTagsAsFields: allTagsAsObject, tagKeysAsFields: tags, tagDotReplacement: tagDotReplacement} +} + +// FromDomain is used to convert model span to db span +type FromDomain struct { + allTagsAsFields bool + tagKeysAsFields map[string]bool + tagDotReplacement string +} + +// FromDomainEmbedProcess converts model.Span into json.Span format. +// This format includes a ParentSpanID and an embedded Process. +func (fd FromDomain) FromDomainEmbedProcess(span *model.Span) *Span { + return fd.convertSpanEmbedProcess(span) +} + +func (fd FromDomain) convertSpanInternal(span *model.Span) Span { + tags, tagsMap := fd.convertKeyValuesString(span.Tags) + return Span{ + TraceID: TraceID(span.TraceID.String()), + SpanID: SpanID(span.SpanID.String()), + Flags: uint32(span.Flags), + OperationName: span.OperationName, + StartTime: model.TimeAsEpochMicroseconds(span.StartTime), + StartTimeMillis: model.TimeAsEpochMicroseconds(span.StartTime) / 1000, + Duration: model.DurationAsMicroseconds(span.Duration), + Tags: tags, + Tag: tagsMap, + Logs: fd.convertLogs(span.Logs), + } +} + +func (fd FromDomain) convertSpanEmbedProcess(span *model.Span) *Span { + s := fd.convertSpanInternal(span) + s.Process = fd.convertProcess(span.Process) + s.References = fd.convertReferences(span) + return &s +} + +func (fd FromDomain) convertReferences(span *model.Span) []Reference { + out := make([]Reference, 0, len(span.References)) + for _, ref := range span.References { + out = append(out, Reference{ + RefType: fd.convertRefType(ref.RefType), + TraceID: TraceID(ref.TraceID.String()), + SpanID: SpanID(ref.SpanID.String()), + }) + } + return out +} + +func (fd FromDomain) convertRefType(refType model.SpanRefType) ReferenceType { + if refType == model.FollowsFrom { + return FollowsFrom + } + return ChildOf +} + +func (fd FromDomain) convertKeyValuesString(keyValues model.KeyValues) ([]KeyValue, map[string]interface{}) { + var tagsMap map[string]interface{} + var kvs []KeyValue + for _, kv := range keyValues { + if kv.GetVType() != model.BinaryType && (fd.allTagsAsFields || fd.tagKeysAsFields[kv.Key]) { + if tagsMap == nil { + tagsMap = map[string]interface{}{} + } + tagsMap[strings.Replace(kv.Key, ".", fd.tagDotReplacement, -1)] = kv.Value() + } else { + kvs = append(kvs, KeyValue{ + Key: kv.Key, + Type: ValueType(strings.ToLower(kv.VType.String())), + Value: kv.AsString(), + }) + } + } + if kvs == nil { + kvs = make([]KeyValue, 0) + } + return kvs, tagsMap +} + +func (fd FromDomain) convertLogs(logs []model.Log) []Log { + out := make([]Log, len(logs)) + for i, log := range logs { + var kvs []KeyValue + for _, kv := range log.Fields { + kvs = append(kvs, KeyValue{ + Key: kv.Key, + Type: ValueType(strings.ToLower(kv.VType.String())), + Value: kv.AsString(), + }) + } + out[i] = Log{ + Timestamp: model.TimeAsEpochMicroseconds(log.Timestamp), + Fields: kvs, + } + } + return out +} + +func (fd FromDomain) convertProcess(process *model.Process) Process { + tags, tagsMap := fd.convertKeyValuesString(process.Tags) + return Process{ + ServiceName: process.ServiceName, + Tags: tags, + Tag: tagsMap, + } +} diff --git a/plugin/storage/es/spanstore/dbmodel/from_domain_test.go b/plugin/storage/es/spanstore/dbmodel/from_domain_test.go new file mode 100644 index 000000000000..c7ad08f6012a --- /dev/null +++ b/plugin/storage/es/spanstore/dbmodel/from_domain_test.go @@ -0,0 +1,107 @@ +// Copyright (c) 2018 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbmodel + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "testing" + + "github.com/gogo/protobuf/jsonpb" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/jaegertracing/jaeger/model" +) + +const NumberOfFixtures = 1 + +func TestFromDomainEmbedProcess(t *testing.T) { + for i := 1; i <= NumberOfFixtures; i++ { + t.Run(fmt.Sprintf("fixture_%d", i), func(t *testing.T) { + domainStr, jsonStr := loadFixtures(t, i) + + var span model.Span + require.NoError(t, jsonpb.Unmarshal(bytes.NewReader(domainStr), &span)) + converter := NewFromDomain(false, nil, ":") + embeddedSpan := converter.FromDomainEmbedProcess(&span) + + var expectedSpan Span + require.NoError(t, json.Unmarshal(jsonStr, &expectedSpan)) + + testJSONEncoding(t, i, jsonStr, embeddedSpan) + + CompareJSONSpans(t, &expectedSpan, embeddedSpan) + }) + } +} + +// Loads and returns domain model and JSON model fixtures with given number i. +func loadFixtures(t *testing.T, i int) ([]byte, []byte) { + in := fmt.Sprintf("fixtures/domain_%02d.json", i) + inStr, err := ioutil.ReadFile(in) + require.NoError(t, err) + out := fmt.Sprintf("fixtures/es_%02d.json", i) + outStr, err := ioutil.ReadFile(out) + require.NoError(t, err) + return inStr, outStr +} + +func testJSONEncoding(t *testing.T, i int, expectedStr []byte, object interface{}) { + buf := &bytes.Buffer{} + enc := json.NewEncoder(buf) + enc.SetIndent("", " ") + + outFile := fmt.Sprintf("fixtures/es_%02d", i) + require.NoError(t, enc.Encode(object)) + + if !assert.Equal(t, string(expectedStr), buf.String()) { + err := ioutil.WriteFile(outFile+"-actual.json", buf.Bytes(), 0644) + assert.NoError(t, err) + } +} + +func TestEmptyTags(t *testing.T) { + tags := make([]model.KeyValue, 0) + span := model.Span{Tags: tags, Process: &model.Process{Tags: tags}} + converter := NewFromDomain(false, nil, ":") + dbSpan := converter.FromDomainEmbedProcess(&span) + assert.Equal(t, 0, len(dbSpan.Tags)) + assert.Equal(t, 0, len(dbSpan.Tag)) +} + +func TestTagMap(t *testing.T) { + tags := []model.KeyValue{ + model.String("foo", "foo"), + model.Bool("a", true), + model.Int64("b.b", 1), + } + span := model.Span{Tags: tags, Process: &model.Process{Tags: tags}} + converter := NewFromDomain(false, []string{"a", "b.b", "b*"}, ":") + dbSpan := converter.FromDomainEmbedProcess(&span) + + assert.Equal(t, 1, len(dbSpan.Tags)) + assert.Equal(t, "foo", dbSpan.Tags[0].Key) + assert.Equal(t, 1, len(dbSpan.Process.Tags)) + assert.Equal(t, "foo", dbSpan.Process.Tags[0].Key) + + tagsMap := map[string]interface{}{} + tagsMap["a"] = true + tagsMap["b:b"] = int64(1) + assert.Equal(t, tagsMap, dbSpan.Tag) + assert.Equal(t, tagsMap, dbSpan.Process.Tag) +} diff --git a/plugin/storage/es/spanstore/dbmodel/json_span_compare_test.go b/plugin/storage/es/spanstore/dbmodel/json_span_compare_test.go new file mode 100644 index 000000000000..779ff980aab7 --- /dev/null +++ b/plugin/storage/es/spanstore/dbmodel/json_span_compare_test.go @@ -0,0 +1,71 @@ +// Copyright (c) 2018 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbmodel + +import ( + "encoding/json" + "sort" + "testing" + + "github.com/kr/pretty" + "github.com/stretchr/testify/assert" +) + +func CompareJSONSpans(t *testing.T, expected *Span, actual *Span) { + sortJSONSpan(expected) + sortJSONSpan(actual) + + if !assert.EqualValues(t, expected, actual) { + for _, err := range pretty.Diff(expected, actual) { + t.Log(err) + } + out, err := json.Marshal(actual) + assert.NoError(t, err) + t.Logf("Actual trace: %s", string(out)) + } +} + +func sortJSONSpan(span *Span) { + sortJSONTags(span.Tags) + sortJSONLogs(span.Logs) + sortJSONProcess(span.Process) +} + +type JSONTagByKey []KeyValue + +func (t JSONTagByKey) Len() int { return len(t) } +func (t JSONTagByKey) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t JSONTagByKey) Less(i, j int) bool { return t[i].Key < t[j].Key } + +func sortJSONTags(tags []KeyValue) { + sort.Sort(JSONTagByKey(tags)) +} + +type JSONLogByTimestamp []Log + +func (t JSONLogByTimestamp) Len() int { return len(t) } +func (t JSONLogByTimestamp) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t JSONLogByTimestamp) Less(i, j int) bool { return t[i].Timestamp < t[j].Timestamp } + +func sortJSONLogs(logs []Log) { + sort.Sort(JSONLogByTimestamp(logs)) + for i := range logs { + sortJSONTags(logs[i].Fields) + } +} + +func sortJSONProcess(process Process) { + sortJSONTags(process.Tags) +} diff --git a/plugin/storage/es/spanstore/dbmodel/model.go b/plugin/storage/es/spanstore/dbmodel/model.go new file mode 100644 index 000000000000..a3b1edb68a4d --- /dev/null +++ b/plugin/storage/es/spanstore/dbmodel/model.go @@ -0,0 +1,94 @@ +// Copyright (c) 2018 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbmodel + +// ReferenceType is the reference type of one span to another +type ReferenceType string + +// TraceID is the shared trace ID of all spans in the trace. +type TraceID string + +// SpanID is the id of a span +type SpanID string + +// ValueType is the type of a value stored in KeyValue struct. +type ValueType string + +const ( + // ChildOf means a span is the child of another span + ChildOf ReferenceType = "CHILD_OF" + // FollowsFrom means a span follows from another span + FollowsFrom ReferenceType = "FOLLOWS_FROM" + + // StringType indicates a string value stored in KeyValue + StringType ValueType = "string" + // BoolType indicates a Boolean value stored in KeyValue + BoolType ValueType = "bool" + // Int64Type indicates a 64bit signed integer value stored in KeyValue + Int64Type ValueType = "int64" + // Float64Type indicates a 64bit float value stored in KeyValue + Float64Type ValueType = "float64" + // BinaryType indicates an arbitrary byte array stored in KeyValue + BinaryType ValueType = "binary" +) + +// Span is ES database representation of the domain span. +type Span struct { + TraceID TraceID `json:"traceID"` + SpanID SpanID `json:"spanID"` + ParentSpanID SpanID `json:"parentSpanID,omitempty"` // deprecated + Flags uint32 `json:"flags,omitempty"` + OperationName string `json:"operationName"` + References []Reference `json:"references"` + StartTime uint64 `json:"startTime"` // microseconds since Unix epoch + // ElasticSearch does not support a UNIX Epoch timestamp in microseconds, + // so Jaeger maps StartTime to a 'long' type. This extra StartTimeMillis field + // works around this issue, enabling timerange queries. + StartTimeMillis uint64 `json:"startTimeMillis"` + Duration uint64 `json:"duration"` // microseconds + Tags []KeyValue `json:"tags"` + // Alternative representation of tags for better kibana support + Tag map[string]interface{} `json:"tag,omitempty"` + Logs []Log `json:"logs"` + Process Process `json:"process,omitempty"` +} + +// Reference is a reference from one span to another +type Reference struct { + RefType ReferenceType `json:"refType"` + TraceID TraceID `json:"traceID"` + SpanID SpanID `json:"spanID"` +} + +// Process is the process emitting a set of spans +type Process struct { + ServiceName string `json:"serviceName"` + Tags []KeyValue `json:"tags"` + // Alternative representation of tags for better kibana support + Tag map[string]interface{} `json:"tag,omitempty"` +} + +// Log is a log emitted in a span +type Log struct { + Timestamp uint64 `json:"timestamp"` + Fields []KeyValue `json:"fields"` +} + +// KeyValue is a a key-value pair with typed value. +type KeyValue struct { + Key string `json:"key"` + Type ValueType `json:"type,omitempty"` + Value interface{} `json:"value"` +} diff --git a/model/converter/json/to_domain.go b/plugin/storage/es/spanstore/dbmodel/to_domain.go similarity index 62% rename from model/converter/json/to_domain.go rename to plugin/storage/es/spanstore/dbmodel/to_domain.go index fc3033446f4b..104e0105a954 100644 --- a/model/converter/json/to_domain.go +++ b/plugin/storage/es/spanstore/dbmodel/to_domain.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Uber Technologies, Inc. +// Copyright (c) 2018 Uber Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,27 +12,39 @@ // See the License for the specific language governing permissions and // limitations under the License. -package json +package dbmodel import ( "encoding/hex" "fmt" "strconv" - - "github.com/pkg/errors" + "strings" "github.com/jaegertracing/jaeger/model" - "github.com/jaegertracing/jaeger/model/json" ) -// SpanToDomain converts json.Span with embedded Process into model.Span format. -func SpanToDomain(span *json.Span) (*model.Span, error) { - return toDomain{}.spanToDomain(span) +// NewToDomain creates ToDomain +func NewToDomain(tagDotReplacement string) ToDomain { + return ToDomain{tagDotReplacement: tagDotReplacement} +} + +// ToDomain is used to convert Span to model.Span +type ToDomain struct { + tagDotReplacement string } -type toDomain struct{} +// ReplaceDot replaces dot with dotReplacement +func (td ToDomain) ReplaceDot(k string) string { + return strings.Replace(k, ".", td.tagDotReplacement, -1) +} + +// ReplaceDotReplacement replaces dotReplacement with dot +func (td ToDomain) ReplaceDotReplacement(k string) string { + return strings.Replace(k, td.tagDotReplacement, ".", -1) +} -func (td toDomain) spanToDomain(dbSpan *json.Span) (*model.Span, error) { +// SpanToDomain converts db span into model Span +func (td ToDomain) SpanToDomain(dbSpan *Span) (*model.Span, error) { tags, err := td.convertKeyValues(dbSpan.Tags) if err != nil { return nil, err @@ -67,6 +79,12 @@ func (td toDomain) spanToDomain(dbSpan *json.Span) (*model.Span, error) { refs = model.MaybeAddParentSpanID(traceID, parentSpanID, refs) } + fieldTags, err := td.convertTagFields(dbSpan.Tag) + if err != nil { + return nil, err + } + tags = append(tags, fieldTags...) + span := &model.Span{ TraceID: traceID, SpanID: model.NewSpanID(uint64(spanIDInt)), @@ -82,14 +100,14 @@ func (td toDomain) spanToDomain(dbSpan *json.Span) (*model.Span, error) { return span, nil } -func (td toDomain) convertRefs(refs []json.Reference) ([]model.SpanRef, error) { +func (td ToDomain) convertRefs(refs []Reference) ([]model.SpanRef, error) { retMe := make([]model.SpanRef, len(refs)) for i, r := range refs { // There are some inconsistencies with ReferenceTypes, hence the hacky fix. var refType model.SpanRefType - if r.RefType == json.ChildOf { + if r.RefType == ChildOf { refType = model.ChildOf - } else if r.RefType == json.FollowsFrom { + } else if r.RefType == FollowsFrom { refType = model.FollowsFrom } else { return nil, fmt.Errorf("not a valid SpanRefType string %s", string(r.RefType)) @@ -114,7 +132,7 @@ func (td toDomain) convertRefs(refs []json.Reference) ([]model.SpanRef, error) { return retMe, nil } -func (td toDomain) convertKeyValues(tags []json.KeyValue) ([]model.KeyValue, error) { +func (td ToDomain) convertKeyValues(tags []KeyValue) ([]model.KeyValue, error) { retMe := make([]model.KeyValue, len(tags)) for i := range tags { kv, err := td.convertKeyValue(&tags[i]) @@ -126,10 +144,45 @@ func (td toDomain) convertKeyValues(tags []json.KeyValue) ([]model.KeyValue, err return retMe, nil } +func (td ToDomain) convertTagFields(tagsMap map[string]interface{}) ([]model.KeyValue, error) { + kvs := make([]model.KeyValue, len(tagsMap)) + i := 0 + for k, v := range tagsMap { + tag, err := td.convertTagField(k, v) + if err != nil { + return nil, err + } + kvs[i] = tag + i++ + } + return kvs, nil +} + +func (td ToDomain) convertTagField(k string, v interface{}) (model.KeyValue, error) { + dKey := td.ReplaceDotReplacement(k) + // The number is always a float64 therefore type assertion on int (v.(int/64/32)) does not work. + // If 1.0, 2.0.. was stored as float it will be read as int + if pInt, err := strconv.ParseInt(fmt.Sprintf("%v", v), 10, 64); err == nil { + return model.Int64(k, pInt), nil + } + switch val := v.(type) { + case float64: + return model.Float64(dKey, val), nil + case bool: + return model.Bool(dKey, val), nil + case string: + return model.String(dKey, val), nil + // the binary is never returned, ES returns it as string with base64 encoding + case []byte: + return model.Binary(dKey, val), nil + default: + return model.String("", ""), fmt.Errorf("invalid tag type in %+v", v) + } +} + // convertKeyValue expects the Value field to be string, because it only works // as a reverse transformation after FromDomain() for ElasticSearch model. -// This method would not work on general JSON from the UI which may contain numeric values. -func (td toDomain) convertKeyValue(tag *json.KeyValue) (model.KeyValue, error) { +func (td ToDomain) convertKeyValue(tag *KeyValue) (model.KeyValue, error) { if tag.Value == nil { return model.KeyValue{}, fmt.Errorf("invalid nil Value in %v", tag) } @@ -138,27 +191,27 @@ func (td toDomain) convertKeyValue(tag *json.KeyValue) (model.KeyValue, error) { return model.KeyValue{}, fmt.Errorf("non-string Value of type %t in %v", tag.Value, tag) } switch tag.Type { - case json.StringType: + case StringType: return model.String(tag.Key, tagValue), nil - case json.BoolType: + case BoolType: value, err := strconv.ParseBool(tagValue) if err != nil { return model.KeyValue{}, err } return model.Bool(tag.Key, value), nil - case json.Int64Type: + case Int64Type: value, err := strconv.ParseInt(tagValue, 10, 64) if err != nil { return model.KeyValue{}, err } return model.Int64(tag.Key, value), nil - case json.Float64Type: + case Float64Type: value, err := strconv.ParseFloat(tagValue, 64) if err != nil { return model.KeyValue{}, err } return model.Float64(tag.Key, value), nil - case json.BinaryType: + case BinaryType: value, err := hex.DecodeString(tagValue) if err != nil { return model.KeyValue{}, err @@ -168,7 +221,7 @@ func (td toDomain) convertKeyValue(tag *json.KeyValue) (model.KeyValue, error) { return model.KeyValue{}, fmt.Errorf("not a valid ValueType string %s", string(tag.Type)) } -func (td toDomain) convertLogs(logs []json.Log) ([]model.Log, error) { +func (td ToDomain) convertLogs(logs []Log) ([]model.Log, error) { retMe := make([]model.Log, len(logs)) for i, l := range logs { fields, err := td.convertKeyValues(l.Fields) @@ -183,14 +236,17 @@ func (td toDomain) convertLogs(logs []json.Log) ([]model.Log, error) { return retMe, nil } -func (td toDomain) convertProcess(process *json.Process) (*model.Process, error) { - if process == nil { - return nil, errors.New("Process is nil") - } +func (td ToDomain) convertProcess(process Process) (*model.Process, error) { tags, err := td.convertKeyValues(process.Tags) if err != nil { return nil, err } + fieldTags, err := td.convertTagFields(process.Tag) + if err != nil { + return nil, err + } + tags = append(tags, fieldTags...) + return &model.Process{ Tags: tags, ServiceName: process.ServiceName, diff --git a/model/converter/json/to_domain_test.go b/plugin/storage/es/spanstore/dbmodel/to_domain_test.go similarity index 60% rename from model/converter/json/to_domain_test.go rename to plugin/storage/es/spanstore/dbmodel/to_domain_test.go index ff72aac9caec..7a26a4d7d2de 100644 --- a/model/converter/json/to_domain_test.go +++ b/plugin/storage/es/spanstore/dbmodel/to_domain_test.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Uber Technologies, Inc. +// Copyright (c) 2018 Uber Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package json +package dbmodel import ( "bytes" @@ -23,11 +23,11 @@ import ( "testing" gogojsonpb "github.com/gogo/protobuf/jsonpb" + "github.com/kr/pretty" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/jaegertracing/jaeger/model" - jModel "github.com/jaegertracing/jaeger/model/json" ) func TestToDomain(t *testing.T) { @@ -45,10 +45,10 @@ func testToDomain(t *testing.T, testParentSpanID bool) { span.ParentSpanID = "3" } - actualSpan, err := SpanToDomain(&span) + actualSpan, err := NewToDomain(":").SpanToDomain(&span) require.NoError(t, err) - out := fmt.Sprintf("fixtures/domain_es_%02d.json", i) + out := fmt.Sprintf("fixtures/domain_%02d.json", i) outStr, err := ioutil.ReadFile(out) require.NoError(t, err) var expectedSpan model.Span @@ -58,25 +58,25 @@ func testToDomain(t *testing.T, testParentSpanID bool) { } } -func loadESSpanFixture(i int) (jModel.Span, error) { +func loadESSpanFixture(i int) (Span, error) { in := fmt.Sprintf("fixtures/es_%02d.json", i) inStr, err := ioutil.ReadFile(in) if err != nil { - return jModel.Span{}, err + return Span{}, err } - var span jModel.Span + var span Span err = json.Unmarshal(inStr, &span) return span, err } -func failingSpanTransform(t *testing.T, embeddedSpan *jModel.Span, errMsg string) { - domainSpan, err := SpanToDomain(embeddedSpan) +func failingSpanTransform(t *testing.T, embeddedSpan *Span, errMsg string) { + domainSpan, err := NewToDomain(":").SpanToDomain(embeddedSpan) assert.Nil(t, domainSpan) assert.EqualError(t, err, errMsg) } -func failingSpanTransformAnyMsg(t *testing.T, embeddedSpan *jModel.Span) { - domainSpan, err := SpanToDomain(embeddedSpan) +func failingSpanTransformAnyMsg(t *testing.T, embeddedSpan *Span) { + domainSpan, err := NewToDomain(":").SpanToDomain(embeddedSpan) assert.Nil(t, domainSpan) assert.Error(t, err) } @@ -85,7 +85,7 @@ func TestFailureBadTypeTags(t *testing.T) { badTagESSpan, err := loadESSpanFixture(1) require.NoError(t, err) - badTagESSpan.Tags = []jModel.KeyValue{ + badTagESSpan.Tags = []KeyValue{ { Key: "meh", Type: "badType", @@ -99,7 +99,7 @@ func TestFailureBadBoolTags(t *testing.T) { badTagESSpan, err := loadESSpanFixture(1) require.NoError(t, err) - badTagESSpan.Tags = []jModel.KeyValue{ + badTagESSpan.Tags = []KeyValue{ { Key: "meh", Value: "meh", @@ -113,7 +113,7 @@ func TestFailureBadIntTags(t *testing.T) { badTagESSpan, err := loadESSpanFixture(1) require.NoError(t, err) - badTagESSpan.Tags = []jModel.KeyValue{ + badTagESSpan.Tags = []KeyValue{ { Key: "meh", Value: "meh", @@ -127,7 +127,7 @@ func TestFailureBadFloatTags(t *testing.T) { badTagESSpan, err := loadESSpanFixture(1) require.NoError(t, err) - badTagESSpan.Tags = []jModel.KeyValue{ + badTagESSpan.Tags = []KeyValue{ { Key: "meh", Value: "meh", @@ -141,7 +141,7 @@ func TestFailureBadBinaryTags(t *testing.T) { badTagESSpan, err := loadESSpanFixture(1) require.NoError(t, err) - badTagESSpan.Tags = []jModel.KeyValue{ + badTagESSpan.Tags = []KeyValue{ { Key: "zzzz", Value: "zzzz", @@ -154,10 +154,10 @@ func TestFailureBadBinaryTags(t *testing.T) { func TestFailureBadLogs(t *testing.T) { badLogsESSpan, err := loadESSpanFixture(1) require.NoError(t, err) - badLogsESSpan.Logs = []jModel.Log{ + badLogsESSpan.Logs = []Log{ { Timestamp: 0, - Fields: []jModel.KeyValue{ + Fields: []KeyValue{ { Key: "sneh", Value: "", @@ -170,13 +170,12 @@ func TestFailureBadLogs(t *testing.T) { } func TestRevertKeyValueOfType(t *testing.T) { - td := toDomain{} tests := []struct { - kv *jModel.KeyValue + kv *KeyValue err string }{ { - kv: &jModel.KeyValue{ + kv: &KeyValue{ Key: "sneh", Type: "badType", Value: "someString", @@ -184,16 +183,17 @@ func TestRevertKeyValueOfType(t *testing.T) { err: "not a valid ValueType string", }, { - kv: &jModel.KeyValue{}, + kv: &KeyValue{}, err: "invalid nil Value", }, { - kv: &jModel.KeyValue{ + kv: &KeyValue{ Value: 123, }, err: "non-string Value of type", }, } + td := ToDomain{} for _, test := range tests { t.Run(test.err, func(t *testing.T) { tag := test.kv @@ -207,7 +207,7 @@ func TestRevertKeyValueOfType(t *testing.T) { func TestFailureBadRefs(t *testing.T) { badRefsESSpan, err := loadESSpanFixture(1) require.NoError(t, err) - badRefsESSpan.References = []jModel.Reference{ + badRefsESSpan.References = []Reference{ { RefType: "makeOurOwnCasino", TraceID: "1", @@ -219,7 +219,7 @@ func TestFailureBadRefs(t *testing.T) { func TestFailureBadTraceIDRefs(t *testing.T) { badRefsESSpan, err := loadESSpanFixture(1) require.NoError(t, err) - badRefsESSpan.References = []jModel.Reference{ + badRefsESSpan.References = []Reference{ { RefType: "CHILD_OF", TraceID: "ZZBADZZ", @@ -232,7 +232,7 @@ func TestFailureBadTraceIDRefs(t *testing.T) { func TestFailureBadSpanIDRefs(t *testing.T) { badRefsESSpan, err := loadESSpanFixture(1) require.NoError(t, err) - badRefsESSpan.References = []jModel.Reference{ + badRefsESSpan.References = []Reference{ { RefType: "CHILD_OF", TraceID: "1", @@ -246,28 +246,20 @@ func TestFailureBadProcess(t *testing.T) { badProcessESSpan, err := loadESSpanFixture(1) require.NoError(t, err) - badTags := []jModel.KeyValue{ + badTags := []KeyValue{ { Key: "meh", Value: "", Type: "badType", }, } - badProcessESSpan.Process = &jModel.Process{ + badProcessESSpan.Process = Process{ ServiceName: "hello", Tags: badTags, } failingSpanTransform(t, &badProcessESSpan, "not a valid ValueType string badType") } -func TestProcessPointer(t *testing.T) { - badProcessESSpan, err := loadESSpanFixture(1) - require.NoError(t, err) - - badProcessESSpan.Process = nil - failingSpanTransform(t, &badProcessESSpan, "Process is nil") -} - func TestFailureBadTraceID(t *testing.T) { badTraceIDESSpan, err := loadESSpanFixture(1) require.NoError(t, err) @@ -288,3 +280,63 @@ func TestFailureBadParentSpanID(t *testing.T) { badParentSpanIDESSpan.ParentSpanID = "zz" failingSpanTransformAnyMsg(t, &badParentSpanIDESSpan) } + +func TestFailureBadSpanFieldTag(t *testing.T) { + badParentSpanIDESSpan, err := loadESSpanFixture(1) + require.NoError(t, err) + badParentSpanIDESSpan.Tag = map[string]interface{}{"foo": struct{}{}} + failingSpanTransformAnyMsg(t, &badParentSpanIDESSpan) +} + +func TestFailureBadProcessFieldTag(t *testing.T) { + badParentSpanIDESSpan, err := loadESSpanFixture(1) + require.NoError(t, err) + badParentSpanIDESSpan.Process.Tag = map[string]interface{}{"foo": struct{}{}} + failingSpanTransformAnyMsg(t, &badParentSpanIDESSpan) +} + +func CompareModelSpans(t *testing.T, expected *model.Span, actual *model.Span) { + model.SortSpan(expected) + model.SortSpan(actual) + + if !assert.EqualValues(t, expected, actual) { + for _, err := range pretty.Diff(expected, actual) { + t.Log(err) + } + out, err := json.Marshal(actual) + assert.NoError(t, err) + t.Logf("Actual trace: %s", string(out)) + } +} + +func TestTagsMap(t *testing.T) { + tests := []struct { + fieldTags map[string]interface{} + expected []model.KeyValue + err error + }{ + {fieldTags: map[string]interface{}{"bool:bool": true}, expected: []model.KeyValue{model.Bool("bool.bool", true)}}, + {fieldTags: map[string]interface{}{"int.int": int64(1)}, expected: []model.KeyValue{model.Int64("int.int", 1)}}, + {fieldTags: map[string]interface{}{"float": float64(1.1)}, expected: []model.KeyValue{model.Float64("float", 1.1)}}, + // we are not able to reproduce type for float 123 or any N.0 number therefore returning int + {fieldTags: map[string]interface{}{"float": float64(123)}, expected: []model.KeyValue{model.Int64("float", 123)}}, + {fieldTags: map[string]interface{}{"float": float64(123.0)}, expected: []model.KeyValue{model.Int64("float", 123)}}, + {fieldTags: map[string]interface{}{"str": "foo"}, expected: []model.KeyValue{model.String("str", "foo")}}, + {fieldTags: map[string]interface{}{"binary": []byte("foo")}, expected: []model.KeyValue{model.Binary("binary", []byte("foo"))}}, + {fieldTags: map[string]interface{}{"unsupported": struct{}{}}, err: fmt.Errorf("invalid tag type in %+v", struct{}{})}, + } + converter := NewToDomain(":") + for i, test := range tests { + t.Run(fmt.Sprintf("%d, %s", i, test.fieldTags), func(t *testing.T) { + tags, err := converter.convertTagFields(test.fieldTags) + assert.Equal(t, test.expected, tags) + assert.Equal(t, test.err, err) + }) + } +} + +func TestDotReplacement(t *testing.T) { + converter := NewToDomain("#") + k := "foo.foo" + assert.Equal(t, k, converter.ReplaceDotReplacement(converter.ReplaceDot(k))) +} diff --git a/plugin/storage/es/spanstore/fixtures/query_01.json b/plugin/storage/es/spanstore/fixtures/query_01.json new file mode 100644 index 000000000000..e38c1e84ac49 --- /dev/null +++ b/plugin/storage/es/spanstore/fixtures/query_01.json @@ -0,0 +1,103 @@ +{ + "bool":{ + "should":[ + { + "bool":{ + "must":{ + "match":{ + "tag.bat@foo":{ + "query":"spook" + } + } + } + } + }, + { + "bool":{ + "must":{ + "match":{ + "process.tag.bat@foo":{ + "query":"spook" + } + } + } + } + }, + { + "nested":{ + "path":"tags", + "query":{ + "bool":{ + "must":[ + { + "match":{ + "tags.key":{ + "query":"bat.foo" + } + } + }, + { + "match":{ + "tags.value":{ + "query":"spook" + } + } + } + ] + } + } + } + }, + { + "nested":{ + "path":"process.tags", + "query":{ + "bool":{ + "must":[ + { + "match":{ + "process.tags.key":{ + "query":"bat.foo" + } + } + }, + { + "match":{ + "process.tags.value":{ + "query":"spook" + } + } + } + ] + } + } + } + }, + { + "nested":{ + "path":"logs.fields", + "query":{ + "bool":{ + "must":[ + { + "match":{ + "logs.fields.key":{ + "query":"bat.foo" + } + } + }, + { + "match":{ + "logs.fields.value":{ + "query":"spook" + } + } + } + ] + } + } + } + } + ] + } +} diff --git a/plugin/storage/es/spanstore/reader.go b/plugin/storage/es/spanstore/reader.go index 752d09a1b37a..eeea231367e3 100644 --- a/plugin/storage/es/spanstore/reader.go +++ b/plugin/storage/es/spanstore/reader.go @@ -26,11 +26,9 @@ import ( "gopkg.in/olivere/elastic.v5" "github.com/jaegertracing/jaeger/model" - jConverter "github.com/jaegertracing/jaeger/model/converter/json" - jModel "github.com/jaegertracing/jaeger/model/json" "github.com/jaegertracing/jaeger/pkg/es" + "github.com/jaegertracing/jaeger/plugin/storage/es/spanstore/dbmodel" "github.com/jaegertracing/jaeger/storage/spanstore" - storageMetrics "github.com/jaegertracing/jaeger/storage/spanstore/metrics" ) const ( @@ -38,16 +36,18 @@ const ( serviceIndex = "jaeger-service-" traceIDAggregation = "traceIDs" - traceIDField = "traceID" - durationField = "duration" - startTimeField = "startTime" - serviceNameField = "process.serviceName" - operationNameField = "operationName" - tagsField = "tags" - processTagsField = "process.tags" - logFieldsField = "logs.fields" - tagKeyField = "key" - tagValueField = "value" + traceIDField = "traceID" + durationField = "duration" + startTimeField = "startTime" + serviceNameField = "process.serviceName" + operationNameField = "operationName" + objectTagsField = "tag" + objectProcessTagsField = "process.tag" + nestedTagsField = "tags" + nestedProcessTagsField = "process.tags" + nestedLogFieldsField = "logs.fields" + tagKeyField = "key" + tagValueField = "value" defaultDocCount = 10000 // the default elasticsearch allowed limit defaultNumTraces = 100 @@ -76,7 +76,9 @@ var ( defaultMaxDuration = model.DurationAsMicroseconds(time.Hour * 24) - tagFieldList = []string{tagsField, processTagsField, logFieldsField} + objectTagFieldList = []string{objectTagsField, objectProcessTagsField} + + nestedTagFieldList = []string{nestedTagsField, nestedProcessTagsField, nestedLogFieldsField} ) // SpanReader can query for and load traces from ElasticSearch @@ -90,31 +92,44 @@ type SpanReader struct { serviceOperationStorage *ServiceOperationStorage spanIndexPrefix string serviceIndexPrefix string + spanConverter dbmodel.ToDomain +} + +// SpanReaderParams holds constructor params for NewSpanReader +type SpanReaderParams struct { + Client es.Client + Logger *zap.Logger + MaxLookback time.Duration + MetricsFactory metrics.Factory + serviceOperationStorage *ServiceOperationStorage + IndexPrefix string + TagDotReplacement string } // NewSpanReader returns a new SpanReader with a metrics. -func NewSpanReader(client es.Client, logger *zap.Logger, maxLookback time.Duration, metricsFactory metrics.Factory, indexPrefix string) spanstore.Reader { - return storageMetrics.NewReadMetricsDecorator(newSpanReader(client, logger, maxLookback, indexPrefix), metricsFactory) +func NewSpanReader(p SpanReaderParams) spanstore.Reader { + return newSpanReader(p) } -func newSpanReader(client es.Client, logger *zap.Logger, maxLookback time.Duration, indexPrefix string) *SpanReader { +func newSpanReader(p SpanReaderParams) *SpanReader { ctx := context.Background() - if indexPrefix != "" { - indexPrefix += ":" + if p.IndexPrefix != "" { + p.IndexPrefix += ":" } return &SpanReader{ ctx: ctx, - client: client, - logger: logger, - maxLookback: maxLookback, - serviceOperationStorage: NewServiceOperationStorage(ctx, client, metrics.NullFactory, logger, 0), // the decorator takes care of metrics - spanIndexPrefix: indexPrefix + spanIndex, - serviceIndexPrefix: indexPrefix + serviceIndex, + client: p.Client, + logger: p.Logger, + maxLookback: p.MaxLookback, + serviceOperationStorage: NewServiceOperationStorage(ctx, p.Client, metrics.NullFactory, p.Logger, 0), // the decorator takes care of metrics + spanIndexPrefix: p.IndexPrefix + spanIndex, + serviceIndexPrefix: p.IndexPrefix + serviceIndex, + spanConverter: dbmodel.NewToDomain(p.TagDotReplacement), } } // GetTrace takes a traceID and returns a Trace associated with that traceID -func (s *SpanReader) GetTrace(traceID model.TraceID) (*model.Trace, error) { +func (s *SpanReader) GetTrace(ctx context.Context, traceID model.TraceID) (*model.Trace, error) { currentTime := time.Now() traces, err := s.multiRead([]string{traceID.String()}, currentTime.Add(-s.maxLookback), currentTime) if err != nil { @@ -134,7 +149,7 @@ func (s *SpanReader) collectSpans(esSpansRaw []*elastic.SearchHit) ([]*model.Spa if err != nil { return nil, errors.Wrap(err, "Marshalling JSON to span object failed") } - span, err := jConverter.SpanToDomain(jsonSpan) + span, err := s.spanConverter.SpanToDomain(jsonSpan) if err != nil { return nil, errors.Wrap(err, "Converting JSONSpan to domain Span failed") } @@ -143,10 +158,10 @@ func (s *SpanReader) collectSpans(esSpansRaw []*elastic.SearchHit) ([]*model.Spa return spans, nil } -func (s *SpanReader) unmarshalJSONSpan(esSpanRaw *elastic.SearchHit) (*jModel.Span, error) { +func (s *SpanReader) unmarshalJSONSpan(esSpanRaw *elastic.SearchHit) (*dbmodel.Span, error) { esSpanInByteArray := esSpanRaw.Source - var jsonSpan jModel.Span + var jsonSpan dbmodel.Span if err := json.Unmarshal(*esSpanInByteArray, &jsonSpan); err != nil { return nil, err } @@ -167,14 +182,14 @@ func (s *SpanReader) indicesForTimeRange(indexName string, startTime time.Time, } // GetServices returns all services traced by Jaeger, ordered by frequency -func (s *SpanReader) GetServices() ([]string, error) { +func (s *SpanReader) GetServices(ctx context.Context) ([]string, error) { currentTime := time.Now() jaegerIndices := s.indicesForTimeRange(s.serviceIndexPrefix, currentTime.Add(-s.maxLookback), currentTime) return s.serviceOperationStorage.getServices(jaegerIndices) } // GetOperations returns all operations for a specific service traced by Jaeger -func (s *SpanReader) GetOperations(service string) ([]string, error) { +func (s *SpanReader) GetOperations(ctx context.Context, service string) ([]string, error) { currentTime := time.Now() jaegerIndices := s.indicesForTimeRange(s.serviceIndexPrefix, currentTime.Add(-s.maxLookback), currentTime) return s.serviceOperationStorage.getOperations(jaegerIndices, service) @@ -193,7 +208,7 @@ func bucketToStringArray(buckets []*elastic.AggregationBucketKeyItem) ([]string, } // FindTraces retrieves traces that match the traceQuery -func (s *SpanReader) FindTraces(traceQuery *spanstore.TraceQueryParameters) ([]*model.Trace, error) { +func (s *SpanReader) FindTraces(ctx context.Context, traceQuery *spanstore.TraceQueryParameters) ([]*model.Trace, error) { if err := validateQuery(traceQuery); err != nil { return nil, err } @@ -225,7 +240,7 @@ func (s *SpanReader) multiRead(traceIDs []string, startTime, endTime time.Time) totalDocumentsFetched := make(map[string]int) tracesMap := make(map[string]*model.Trace) for { - if traceIDs == nil || len(traceIDs) == 0 { + if len(traceIDs) == 0 { break } @@ -260,10 +275,7 @@ func (s *SpanReader) multiRead(traceIDs []string, startTime, endTime time.Time) lastSpanTraceID := lastSpan.TraceID.String() if traceSpan, ok := tracesMap[lastSpanTraceID]; ok { - for _, span := range spans { - traceSpan.Spans = append(traceSpan.Spans, span) - } - + traceSpan.Spans = append(traceSpan.Spans, spans...) } else { tracesMap[lastSpanTraceID] = &model.Trace{Spans: spans} } @@ -339,7 +351,15 @@ func (s *SpanReader) findTraceIDs(traceQuery *spanstore.TraceQueryParameters) ([ // { "match" : {"tags.key" : "tag3"} }, // { "match" : {"tags.value" : "xyz"} } // ] - // }}}} + // }}}}, + // { "bool":{ + // "must": { + // "match":{ "tags.bat":{ "query":"spook" }} + // }}}, + // { "bool":{ + // "must": { + // "match":{ "tag.bat":{ "query":"spook" }} + // }}} // ] // } // ] @@ -444,10 +464,17 @@ func (s *SpanReader) buildOperationNameQuery(operationName string) elastic.Query } func (s *SpanReader) buildTagQuery(k string, v string) elastic.Query { - queries := make([]elastic.Query, len(tagFieldList)) - for i := range queries { - queries[i] = s.buildNestedQuery(tagFieldList[i], k, v) + objectTagListLen := len(objectTagFieldList) + queries := make([]elastic.Query, len(nestedTagFieldList)+objectTagListLen) + kd := s.spanConverter.ReplaceDot(k) + for i := range objectTagFieldList { + queries[i] = s.buildObjectQuery(objectTagFieldList[i], kd, v) + } + for i := range nestedTagFieldList { + queries[i+objectTagListLen] = s.buildNestedQuery(nestedTagFieldList[i], k, v) } + + // but configuration can change over time return elastic.NewBoolQuery().Should(queries...) } @@ -459,3 +486,9 @@ func (s *SpanReader) buildNestedQuery(field string, k string, v string) elastic. tagBoolQuery := elastic.NewBoolQuery().Must(keyQuery, valueQuery) return elastic.NewNestedQuery(field, tagBoolQuery) } + +func (s *SpanReader) buildObjectQuery(field string, k string, v string) elastic.Query { + keyField := fmt.Sprintf("%s.%s", field, k) + keyQuery := elastic.NewMatchQuery(keyField, v) + return elastic.NewBoolQuery().Must(keyQuery) +} diff --git a/plugin/storage/es/spanstore/reader_test.go b/plugin/storage/es/spanstore/reader_test.go index e1ace550b196..a48a02d1f054 100644 --- a/plugin/storage/es/spanstore/reader_test.go +++ b/plugin/storage/es/spanstore/reader_test.go @@ -15,8 +15,10 @@ package spanstore import ( + "context" "encoding/json" "errors" + "io/ioutil" "testing" "time" @@ -28,9 +30,9 @@ import ( "gopkg.in/olivere/elastic.v5" "github.com/jaegertracing/jaeger/model" - esJson "github.com/jaegertracing/jaeger/model/json" "github.com/jaegertracing/jaeger/pkg/es/mocks" "github.com/jaegertracing/jaeger/pkg/testutils" + "github.com/jaegertracing/jaeger/plugin/storage/es/spanstore/dbmodel" "github.com/jaegertracing/jaeger/storage/spanstore" ) @@ -89,7 +91,13 @@ func withSpanReader(fn func(r *spanReaderTest)) { client: client, logger: logger, logBuffer: logBuffer, - reader: newSpanReader(client, logger, 72*time.Hour, ""), + reader: newSpanReader(SpanReaderParams{ + Client: client, + Logger: zap.NewNop(), + MaxLookback: 0, + IndexPrefix: "", + TagDotReplacement: "@", + }), } fn(r) } @@ -98,7 +106,12 @@ var _ spanstore.Reader = &SpanReader{} // check API conformance func TestNewSpanReader(t *testing.T) { client := &mocks.Client{} - reader := NewSpanReader(client, zap.NewNop(), 0, metrics.NullFactory, "") + reader := NewSpanReader(SpanReaderParams{ + Client: client, + Logger: zap.NewNop(), + MaxLookback: 0, + MetricsFactory: metrics.NullFactory, + IndexPrefix: ""}) assert.NotNil(t, reader) } @@ -113,7 +126,11 @@ func TestNewSpanReaderIndexPrefix(t *testing.T) { } for _, testCase := range testCases { client := &mocks.Client{} - r := newSpanReader(client, zap.NewNop(), 0, testCase.prefix) + r := newSpanReader(SpanReaderParams{ + Client: client, + Logger: zap.NewNop(), + MaxLookback: 0, + IndexPrefix: testCase.prefix}) assert.Equal(t, testCase.expected+spanIndex, r.spanIndexPrefix) assert.Equal(t, testCase.expected+serviceIndex, r.serviceIndexPrefix) } @@ -135,7 +152,7 @@ func TestSpanReader_GetTrace(t *testing.T) { }, }, nil) - trace, err := r.reader.GetTrace(model.NewTraceID(0, 1)) + trace, err := r.reader.GetTrace(context.Background(), model.NewTraceID(0, 1)) require.NoError(t, err) require.NotNil(t, trace) @@ -166,7 +183,7 @@ func TestSpanReader_SearchAfter(t *testing.T) { }, }, nil).Times(2) - trace, err := r.reader.GetTrace(model.NewTraceID(0, 1)) + trace, err := r.reader.GetTrace(context.Background(), model.NewTraceID(0, 1)) require.NoError(t, err) require.NotNil(t, trace) @@ -185,7 +202,7 @@ func TestSpanReader_GetTraceQueryError(t *testing.T) { Return(&elastic.MultiSearchResult{ Responses: []*elastic.SearchResult{}, }, nil) - trace, err := r.reader.GetTrace(model.NewTraceID(0, 1)) + trace, err := r.reader.GetTrace(context.Background(), model.NewTraceID(0, 1)) require.EqualError(t, err, "No trace with that ID found") require.Nil(t, trace) }) @@ -204,7 +221,7 @@ func TestSpanReader_GetTraceNilHits(t *testing.T) { }, }, nil) - trace, err := r.reader.GetTrace(model.NewTraceID(0, 1)) + trace, err := r.reader.GetTrace(context.Background(), model.NewTraceID(0, 1)) require.EqualError(t, err, "No trace with that ID found") require.Nil(t, trace) }) @@ -227,7 +244,7 @@ func TestSpanReader_GetTraceInvalidSpanError(t *testing.T) { }, }, nil) - trace, err := r.reader.GetTrace(model.NewTraceID(0, 1)) + trace, err := r.reader.GetTrace(context.Background(), model.NewTraceID(0, 1)) require.Error(t, err, "invalid span") require.Nil(t, trace) }) @@ -251,7 +268,7 @@ func TestSpanReader_GetTraceSpanConversionError(t *testing.T) { }, }, nil) - trace, err := r.reader.GetTrace(model.NewTraceID(0, 1)) + trace, err := r.reader.GetTrace(context.Background(), model.NewTraceID(0, 1)) require.Error(t, err, "span conversion error, because lacks elements") require.Nil(t, trace) }) @@ -268,7 +285,7 @@ func TestSpanReader_esJSONtoJSONSpanModel(t *testing.T) { span, err := r.reader.unmarshalJSONSpan(esSpanRaw) require.NoError(t, err) - var expectedSpan esJson.Span + var expectedSpan dbmodel.Span require.NoError(t, json.Unmarshal(exampleESSpan, &expectedSpan)) assert.EqualValues(t, &expectedSpan, span) }) @@ -392,9 +409,9 @@ func testGet(typ string, t *testing.T) { func returnSearchFunc(typ string, r *spanReaderTest) ([]string, error) { if typ == servicesAggregation { - return r.reader.GetServices() + return r.reader.GetServices(context.Background()) } else if typ == operationsAggregation { - return r.reader.GetOperations("someService") + return r.reader.GetOperations(context.Background(), "someService") } else if typ == traceIDAggregation { return r.reader.findTraceIDs(&spanstore.TraceQueryParameters{}) } @@ -460,7 +477,7 @@ func TestSpanReader_FindTraces(t *testing.T) { NumTraces: 1, } - traces, err := r.reader.FindTraces(traceQuery) + traces, err := r.reader.FindTraces(context.Background(), traceQuery) require.NoError(t, err) assert.Len(t, traces, 1) @@ -504,7 +521,7 @@ func TestSpanReader_FindTracesInvalidQuery(t *testing.T) { StartTimeMax: time.Now(), } - traces, err := r.reader.FindTraces(traceQuery) + traces, err := r.reader.FindTraces(context.Background(), traceQuery) require.Error(t, err) assert.Nil(t, traces) }) @@ -536,7 +553,7 @@ func TestSpanReader_FindTracesAggregationFailure(t *testing.T) { StartTimeMax: time.Now(), } - traces, err := r.reader.FindTraces(traceQuery) + traces, err := r.reader.FindTraces(context.Background(), traceQuery) require.Error(t, err) assert.Nil(t, traces) }) @@ -570,7 +587,7 @@ func TestSpanReader_FindTracesNoTraceIDs(t *testing.T) { StartTimeMax: time.Now(), } - traces, err := r.reader.FindTraces(traceQuery) + traces, err := r.reader.FindTraces(context.Background(), traceQuery) require.NoError(t, err) assert.Len(t, traces, 0) }) @@ -603,7 +620,7 @@ func TestSpanReader_FindTracesReadTraceFailure(t *testing.T) { StartTimeMax: time.Now(), } - traces, err := r.reader.FindTraces(traceQuery) + traces, err := r.reader.FindTraces(context.Background(), traceQuery) require.EqualError(t, err, "read error") assert.Len(t, traces, 0) }) @@ -641,7 +658,7 @@ func TestSpanReader_FindTracesSpanCollectionFailure(t *testing.T) { StartTimeMax: time.Now(), } - traces, err := r.reader.FindTraces(traceQuery) + traces, err := r.reader.FindTraces(context.Background(), traceQuery) require.Error(t, err) assert.Len(t, traces, 0) }) @@ -849,45 +866,15 @@ func TestSpanReader_buildOperationNameQuery(t *testing.T) { } func TestSpanReader_buildTagQuery(t *testing.T) { - expectedStr := - `{ "bool": { - "should": [ - { "nested" : { - "path" : "tags", - "query" : { - "bool" : { - "must" : [ - { "match" : {"tags.key" : {"query":"bat"}} }, - { "match" : {"tags.value" : {"query":"spook"}} } - ] - }}}}, - { "nested" : { - "path" : "process.tags", - "query" : { - "bool" : { - "must" : [ - { "match" : {"process.tags.key" : {"query":"bat"}} }, - { "match" : {"process.tags.value" : {"query":"spook"}} } - ] - }}}}, - { "nested" : { - "path" : "logs.fields", - "query" : { - "bool" : { - "must" : [ - { "match" : {"logs.fields.key" : {"query":"bat"}} }, - { "match" : {"logs.fields.value" : {"query":"spook"}} } - ] - }}}} - ] - }}` + inStr, err := ioutil.ReadFile("fixtures/query_01.json") + require.NoError(t, err) withSpanReader(func(r *spanReaderTest) { - tagQuery := r.reader.buildTagQuery("bat", "spook") + tagQuery := r.reader.buildTagQuery("bat.foo", "spook") actual, err := tagQuery.Source() require.NoError(t, err) expected := make(map[string]interface{}) - json.Unmarshal([]byte(expectedStr), &expected) + json.Unmarshal(inStr, &expected) assert.EqualValues(t, expected, actual) }) @@ -912,7 +899,7 @@ func TestSpanReader_GetEmptyIndex(t *testing.T) { NumTraces: 2, } - services, err := r.reader.FindTraces(traceQuery) + services, err := r.reader.FindTraces(context.Background(), traceQuery) require.NoError(t, err) assert.Empty(t, services) }) diff --git a/plugin/storage/es/spanstore/schema.go b/plugin/storage/es/spanstore/schema.go index 40b450ade483..e59a54dbc6b1 100644 --- a/plugin/storage/es/spanstore/schema.go +++ b/plugin/storage/es/spanstore/schema.go @@ -14,7 +14,9 @@ package spanstore -import "fmt" +import ( + "fmt" +) // TODO: resolve traceID concerns (may not require any changes here) const mapping = `{ @@ -29,7 +31,26 @@ const mapping = `{ "_default_":{ "_all":{ "enabled":false - } + }, + "dynamic_templates": [ + { + "span_tags_map": { + "mapping": { + "type": "keyword", + "ignore_above": 256 + }, + "path_match": "tag.*" + } + }, + { + "process_tags_map": { + "mapping": { + "type": "keyword", + "ignore_above": 256 + }, + "path_match": "process.tag.*" + } + }] }, "%s":%s } @@ -101,6 +122,9 @@ var ( "type": "keyword", "ignore_above": 256 }, + "tag": { + "type": "object" + }, "tags": { "type": "nested", "dynamic": false, @@ -139,6 +163,9 @@ var ( } } }, + "tag": { + "type": "object" + }, "tags": { "type": "nested", "dynamic": false, diff --git a/plugin/storage/es/spanstore/service_operation.go b/plugin/storage/es/spanstore/service_operation.go index ee6aced09929..db7e9f490187 100644 --- a/plugin/storage/es/spanstore/service_operation.go +++ b/plugin/storage/es/spanstore/service_operation.go @@ -23,9 +23,9 @@ import ( "go.uber.org/zap" "gopkg.in/olivere/elastic.v5" - jModel "github.com/jaegertracing/jaeger/model/json" "github.com/jaegertracing/jaeger/pkg/cache" "github.com/jaegertracing/jaeger/pkg/es" + "github.com/jaegertracing/jaeger/plugin/storage/es/spanstore/dbmodel" storageMetrics "github.com/jaegertracing/jaeger/storage/spanstore/metrics" ) @@ -67,7 +67,7 @@ func NewServiceOperationStorage( } // Write saves a service to operation pair. -func (s *ServiceOperationStorage) Write(indexName string, jsonSpan *jModel.Span) { +func (s *ServiceOperationStorage) Write(indexName string, jsonSpan *dbmodel.Span) { // Insert serviceName:operationName document service := Service{ ServiceName: jsonSpan.Process.ServiceName, diff --git a/plugin/storage/es/spanstore/service_operation_test.go b/plugin/storage/es/spanstore/service_operation_test.go index e3517738b471..de6485c62767 100644 --- a/plugin/storage/es/spanstore/service_operation_test.go +++ b/plugin/storage/es/spanstore/service_operation_test.go @@ -15,6 +15,7 @@ package spanstore import ( + "context" "testing" "github.com/stretchr/testify/assert" @@ -22,8 +23,8 @@ import ( "github.com/stretchr/testify/require" "gopkg.in/olivere/elastic.v5" - jModel "github.com/jaegertracing/jaeger/model/json" "github.com/jaegertracing/jaeger/pkg/es/mocks" + "github.com/jaegertracing/jaeger/plugin/storage/es/spanstore/dbmodel" ) func TestWriteService(t *testing.T) { @@ -41,11 +42,11 @@ func TestWriteService(t *testing.T) { w.client.On("Index").Return(indexService) - jsonSpan := &jModel.Span{ - TraceID: jModel.TraceID("1"), - SpanID: jModel.SpanID("0"), + jsonSpan := &dbmodel.Span{ + TraceID: dbmodel.TraceID("1"), + SpanID: dbmodel.SpanID("0"), OperationName: "operation", - Process: &jModel.Process{ + Process: dbmodel.Process{ ServiceName: "service", }, } @@ -76,11 +77,11 @@ func TestWriteServiceError(t *testing.T) { w.client.On("Index").Return(indexService) - jsonSpan := &jModel.Span{ - TraceID: jModel.TraceID("1"), - SpanID: jModel.SpanID("0"), + jsonSpan := &dbmodel.Span{ + TraceID: dbmodel.TraceID("1"), + SpanID: dbmodel.SpanID("0"), OperationName: "operation", - Process: &jModel.Process{ + Process: dbmodel.Process{ ServiceName: "service", }, } @@ -105,7 +106,7 @@ func TestSpanReader_GetServicesEmptyIndex(t *testing.T) { Return(&elastic.MultiSearchResult{ Responses: []*elastic.SearchResult{}, }, nil) - services, err := r.reader.GetServices() + services, err := r.reader.GetServices(context.Background()) require.NoError(t, err) assert.Empty(t, services) }) @@ -119,7 +120,7 @@ func TestSpanReader_GetOperationsEmptyIndex(t *testing.T) { Return(&elastic.MultiSearchResult{ Responses: []*elastic.SearchResult{}, }, nil) - services, err := r.reader.GetOperations("foo") + services, err := r.reader.GetOperations(context.Background(), "foo") require.NoError(t, err) assert.Empty(t, services) }) diff --git a/plugin/storage/es/spanstore/writer.go b/plugin/storage/es/spanstore/writer.go index 799a076d7dcc..e89395b11e74 100644 --- a/plugin/storage/es/spanstore/writer.go +++ b/plugin/storage/es/spanstore/writer.go @@ -28,10 +28,9 @@ import ( "gopkg.in/olivere/elastic.v5" "github.com/jaegertracing/jaeger/model" - "github.com/jaegertracing/jaeger/model/converter/json" - jModel "github.com/jaegertracing/jaeger/model/json" "github.com/jaegertracing/jaeger/pkg/cache" "github.com/jaegertracing/jaeger/pkg/es" + "github.com/jaegertracing/jaeger/plugin/storage/es/spanstore/dbmodel" storageMetrics "github.com/jaegertracing/jaeger/storage/spanstore/metrics" ) @@ -46,7 +45,7 @@ type spanWriterMetrics struct { indexCreate *storageMetrics.WriteMetrics } -type serviceWriter func(string, *jModel.Span) +type serviceWriter func(string, *dbmodel.Span) // SpanWriter is a wrapper around elastic.Client type SpanWriter struct { @@ -60,6 +59,7 @@ type SpanWriter struct { numReplicas int64 spanIndexPrefix string serviceIndexPrefix string + spanConverter dbmodel.FromDomain } // Service is the JSON struct for service:operation documents in ElasticSearch @@ -68,15 +68,6 @@ type Service struct { OperationName string `json:"operationName"` } -// Span adds a StartTimeMillis field to the standard JSON span. -// ElasticSearch does not support a UNIX Epoch timestamp in microseconds, -// so Jaeger maps StartTime to a 'long' type. This extra StartTimeMillis field -// works around this issue, enabling timerange queries. -type Span struct { - *jModel.Span - StartTimeMillis uint64 `json:"startTimeMillis"` -} - func (s Service) hashCode() string { h := fnv.New64a() h.Write([]byte(s.ServiceName)) @@ -84,31 +75,37 @@ func (s Service) hashCode() string { return fmt.Sprintf("%x", h.Sum64()) } +// SpanWriterParams holds constructor parameters for NewSpanWriter +type SpanWriterParams struct { + Client es.Client + Logger *zap.Logger + MetricsFactory metrics.Factory + NumShards int64 + NumReplicas int64 + IndexPrefix string + AllTagsAsFields bool + TagKeysAsFields []string + TagDotReplacement string +} + // NewSpanWriter creates a new SpanWriter for use -func NewSpanWriter( - client es.Client, - logger *zap.Logger, - metricsFactory metrics.Factory, - numShards int64, - numReplicas int64, - indexPrefix string, -) *SpanWriter { +func NewSpanWriter(p SpanWriterParams) *SpanWriter { ctx := context.Background() - if numShards == 0 { - numShards = defaultNumShards + if p.NumShards == 0 { + p.NumShards = defaultNumShards } // TODO: Configurable TTL - serviceOperationStorage := NewServiceOperationStorage(ctx, client, metricsFactory, logger, time.Hour*12) - if indexPrefix != "" { - indexPrefix += ":" + serviceOperationStorage := NewServiceOperationStorage(ctx, p.Client, p.MetricsFactory, p.Logger, time.Hour*12) + if p.IndexPrefix != "" { + p.IndexPrefix += ":" } return &SpanWriter{ ctx: ctx, - client: client, - logger: logger, + client: p.Client, + logger: p.Logger, writerMetrics: spanWriterMetrics{ - indexCreate: storageMetrics.NewWriteMetrics(metricsFactory, "index_create"), + indexCreate: storageMetrics.NewWriteMetrics(p.MetricsFactory, "index_create"), }, serviceWriter: serviceOperationStorage.Write, indexCache: cache.NewLRUWithOptions( @@ -117,10 +114,11 @@ func NewSpanWriter( TTL: 48 * time.Hour, }, ), - numShards: numShards, - numReplicas: numReplicas, - spanIndexPrefix: indexPrefix + spanIndex, - serviceIndexPrefix: indexPrefix + serviceIndex, + numShards: p.NumShards, + numReplicas: p.NumReplicas, + spanIndexPrefix: p.IndexPrefix + spanIndex, + serviceIndexPrefix: p.IndexPrefix + serviceIndex, + spanConverter: dbmodel.NewFromDomain(p.AllTagsAsFields, p.TagKeysAsFields, p.TagDotReplacement), } } @@ -129,8 +127,7 @@ func (s *SpanWriter) WriteSpan(span *model.Span) error { spanIndexName := indexWithDate(s.spanIndexPrefix, span.StartTime) serviceIndexName := indexWithDate(s.serviceIndexPrefix, span.StartTime) - // Convert model.Span into json.Span - jsonSpan := json.FromDomainEmbedProcess(span) + jsonSpan := s.spanConverter.FromDomainEmbedProcess(span) if err := s.createIndex(serviceIndexName, serviceMapping, jsonSpan); err != nil { return err @@ -153,7 +150,7 @@ func indexWithDate(indexPrefix string, date time.Time) string { return indexPrefix + spanDate } -func (s *SpanWriter) createIndex(indexName string, mapping string, jsonSpan *jModel.Span) error { +func (s *SpanWriter) createIndex(indexName string, mapping string, jsonSpan *dbmodel.Span) error { if !keyInCache(indexName, s.indexCache) { start := time.Now() exists, _ := s.client.IndexExists(indexName).Do(s.ctx) // don't need to check the error because the exists variable will be false anyway if there is an error @@ -192,17 +189,15 @@ func (s *SpanWriter) fixMapping(mapping string) string { return mapping } -func (s *SpanWriter) writeService(indexName string, jsonSpan *jModel.Span) { +func (s *SpanWriter) writeService(indexName string, jsonSpan *dbmodel.Span) { s.serviceWriter(indexName, jsonSpan) } -func (s *SpanWriter) writeSpan(indexName string, jsonSpan *jModel.Span) { - elasticSpan := Span{Span: jsonSpan, StartTimeMillis: jsonSpan.StartTime / 1000} // Microseconds to milliseconds - - s.client.Index().Index(indexName).Type(spanType).BodyJson(&elasticSpan).Add() +func (s *SpanWriter) writeSpan(indexName string, jsonSpan *dbmodel.Span) { + s.client.Index().Index(indexName).Type(spanType).BodyJson(&jsonSpan).Add() } -func (s *SpanWriter) logError(span *jModel.Span, err error, msg string, logger *zap.Logger) error { +func (s *SpanWriter) logError(span *dbmodel.Span, err error, msg string, logger *zap.Logger) error { logger. With(zap.String("trace_id", string(span.TraceID))). With(zap.String("span_id", string(span.SpanID))). diff --git a/plugin/storage/es/spanstore/writer_test.go b/plugin/storage/es/spanstore/writer_test.go index d8d70a7f08f7..407d70c21271 100644 --- a/plugin/storage/es/spanstore/writer_test.go +++ b/plugin/storage/es/spanstore/writer_test.go @@ -27,9 +27,9 @@ import ( "go.uber.org/zap" "github.com/jaegertracing/jaeger/model" - "github.com/jaegertracing/jaeger/model/json" "github.com/jaegertracing/jaeger/pkg/es/mocks" "github.com/jaegertracing/jaeger/pkg/testutils" + "github.com/jaegertracing/jaeger/plugin/storage/es/spanstore/dbmodel" "github.com/jaegertracing/jaeger/storage/spanstore" ) @@ -48,7 +48,7 @@ func withSpanWriter(fn func(w *spanWriterTest)) { client: client, logger: logger, logBuffer: logBuffer, - writer: NewSpanWriter(client, logger, metricsFactory, 0, 0, ""), + writer: NewSpanWriter(SpanWriterParams{Client: client, Logger: logger, MetricsFactory: metricsFactory}), } fn(w) } @@ -68,7 +68,8 @@ func TestNewSpanWriterIndexPrefix(t *testing.T) { logger, _ := testutils.NewLogger() metricsFactory := metrics.NewLocalFactory(0) for _, testCase := range testCases { - w := NewSpanWriter(client, logger, metricsFactory, 0, 0, testCase.prefix) + w := NewSpanWriter(SpanWriterParams{Client: client, Logger: logger, MetricsFactory: metricsFactory, + IndexPrefix: testCase.prefix}) assert.Equal(t, testCase.expected+spanIndex, w.spanIndexPrefix) assert.Equal(t, testCase.expected+serviceIndex, w.serviceIndexPrefix) } @@ -184,7 +185,7 @@ func TestSpanWriter_WriteSpan(t *testing.T) { indexServicePut.On("Add") indexSpanPut.On("Id", mock.AnythingOfType("string")).Return(indexSpanPut) - indexSpanPut.On("BodyJson", mock.AnythingOfType("*spanstore.Span")).Return(indexSpanPut) + indexSpanPut.On("BodyJson", mock.AnythingOfType("**dbmodel.Span")).Return(indexSpanPut) indexSpanPut.On("Add") w.client.On("IndexExists", stringMatcher(spanIndexName)).Return(spanExistsService) @@ -272,12 +273,12 @@ func TestWriteSpanInternal(t *testing.T) { indexName := "jaeger-1995-04-21" indexService.On("Index", stringMatcher(indexName)).Return(indexService) indexService.On("Type", stringMatcher(spanType)).Return(indexService) - indexService.On("BodyJson", mock.AnythingOfType("*spanstore.Span")).Return(indexService) + indexService.On("BodyJson", mock.AnythingOfType("**dbmodel.Span")).Return(indexService) indexService.On("Add") w.client.On("Index").Return(indexService) - jsonSpan := &json.Span{} + jsonSpan := &dbmodel.Span{} w.writer.writeSpan(indexName, jsonSpan) indexService.AssertNumberOfCalls(t, "Add", 1) @@ -292,14 +293,14 @@ func TestWriteSpanInternalError(t *testing.T) { indexName := "jaeger-1995-04-21" indexService.On("Index", stringMatcher(indexName)).Return(indexService) indexService.On("Type", stringMatcher(spanType)).Return(indexService) - indexService.On("BodyJson", mock.AnythingOfType("*spanstore.Span")).Return(indexService) + indexService.On("BodyJson", mock.AnythingOfType("**dbmodel.Span")).Return(indexService) indexService.On("Add") w.client.On("Index").Return(indexService) - jsonSpan := &json.Span{ - TraceID: json.TraceID("1"), - SpanID: json.SpanID("0"), + jsonSpan := &dbmodel.Span{ + TraceID: dbmodel.TraceID("1"), + SpanID: dbmodel.SpanID("0"), } w.writer.writeSpan(indexName, jsonSpan) @@ -307,6 +308,59 @@ func TestWriteSpanInternalError(t *testing.T) { }) } +func TestNewSpanTags(t *testing.T) { + client := &mocks.Client{} + logger, _ := testutils.NewLogger() + metricsFactory := metrics.NewLocalFactory(0) + testCases := []struct { + writer *SpanWriter + expected dbmodel.Span + name string + }{ + { + writer: NewSpanWriter(SpanWriterParams{Client: client, Logger: logger, MetricsFactory: metricsFactory, + AllTagsAsFields: true}), + expected: dbmodel.Span{Tag: map[string]interface{}{"foo": "bar"}, Tags: []dbmodel.KeyValue{}, + Process: dbmodel.Process{Tag: map[string]interface{}{"bar": "baz"}, Tags: []dbmodel.KeyValue{}}}, + name: "allTagsAsFields", + }, + { + writer: NewSpanWriter(SpanWriterParams{Client: client, Logger: logger, MetricsFactory: metricsFactory, + TagKeysAsFields: []string{"foo", "bar", "rere"}}), + expected: dbmodel.Span{Tag: map[string]interface{}{"foo": "bar"}, Tags: []dbmodel.KeyValue{}, + Process: dbmodel.Process{Tag: map[string]interface{}{"bar": "baz"}, Tags: []dbmodel.KeyValue{}}}, + name: "definedTagNames", + }, + { + writer: NewSpanWriter(SpanWriterParams{Client: client, Logger: logger, MetricsFactory: metricsFactory}), + expected: dbmodel.Span{ + Tags: []dbmodel.KeyValue{{ + Key: "foo", + Type: dbmodel.StringType, + Value: "bar", + }}, + Process: dbmodel.Process{Tags: []dbmodel.KeyValue{{ + Key: "bar", + Type: dbmodel.StringType, + Value: "baz", + }}}}, + name: "noAllTagsAsFields", + }, + } + + s := &model.Span{Tags: []model.KeyValue{{Key: "foo", VStr: "bar"}}, + Process: &model.Process{Tags: []model.KeyValue{{Key: "bar", VStr: "baz"}}}} + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + mSpan := test.writer.spanConverter.FromDomainEmbedProcess(s) + assert.Equal(t, test.expected.Tag, mSpan.Tag) + assert.Equal(t, test.expected.Tags, mSpan.Tags) + assert.Equal(t, test.expected.Process.Tag, mSpan.Process.Tag) + assert.Equal(t, test.expected.Process.Tags, mSpan.Process.Tags) + }) + } +} + // stringMatcher can match a string argument when it contains a specific substring q func stringMatcher(q string) interface{} { matchFunc := func(s string) bool { diff --git a/plugin/storage/integration/domain_trace_compare_test.go b/plugin/storage/integration/domain_trace_compare_test.go index 3e880f7e00dd..c9a784e5c798 100644 --- a/plugin/storage/integration/domain_trace_compare_test.go +++ b/plugin/storage/integration/domain_trace_compare_test.go @@ -37,8 +37,11 @@ func CompareSliceOfTraces(t *testing.T, expected []*model.Trace, actual []*model t.Log(err) } out, err := json.Marshal(actual) + out2, err2 := json.Marshal(expected) assert.NoError(t, err) + assert.NoError(t, err2) t.Logf("Actual traces: %s", string(out)) + t.Logf("Expected traces: %s", string(out2)) } } diff --git a/plugin/storage/integration/elasticsearch_test.go b/plugin/storage/integration/elasticsearch_test.go index c24a6c970132..f9710d667c0e 100644 --- a/plugin/storage/integration/elasticsearch_test.go +++ b/plugin/storage/integration/elasticsearch_test.go @@ -34,13 +34,14 @@ import ( ) const ( - host = "0.0.0.0" - queryPort = "9200" - queryHostPort = host + ":" + queryPort - queryURL = "http://" + queryHostPort - username = "elastic" // the elasticsearch default username - password = "changeme" // the elasticsearch default password - indexPrefix = "integration-test" + host = "0.0.0.0" + queryPort = "9200" + queryHostPort = host + ":" + queryPort + queryURL = "http://" + queryHostPort + username = "elastic" // the elasticsearch default username + password = "changeme" // the elasticsearch default password + indexPrefix = "integration-test" + tagKeyDeDotChar = "@" ) type ESStorageIntegration struct { @@ -51,7 +52,7 @@ type ESStorageIntegration struct { logger *zap.Logger } -func (s *ESStorageIntegration) initializeES() error { +func (s *ESStorageIntegration) initializeES(allTagsAsFields bool) error { rawClient, err := elastic.NewClient( elastic.SetURL(queryURL), elastic.SetBasicAuth(username, password), @@ -68,24 +69,40 @@ func (s *ESStorageIntegration) initializeES() error { dependencyStore := dependencystore.NewDependencyStore(client, s.logger, indexPrefix) s.DependencyReader = dependencyStore s.DependencyWriter = dependencyStore - s.initSpanstore() - s.CleanUp = s.esCleanUp + s.initSpanstore(allTagsAsFields) + s.CleanUp = func() error { + return s.esCleanUp(allTagsAsFields) + } s.Refresh = s.esRefresh - s.esCleanUp() + s.esCleanUp(allTagsAsFields) return nil } -func (s *ESStorageIntegration) esCleanUp() error { +func (s *ESStorageIntegration) esCleanUp(allTagsAsFields bool) error { _, err := s.client.DeleteIndex("*").Do(context.Background()) - s.initSpanstore() + s.initSpanstore(allTagsAsFields) return err } -func (s *ESStorageIntegration) initSpanstore() { +func (s *ESStorageIntegration) initSpanstore(allTagsAsFields bool) { bp, _ := s.client.BulkProcessor().BulkActions(1).FlushInterval(time.Nanosecond).Do(context.Background()) client := es.WrapESClient(s.client, bp) - s.SpanWriter = spanstore.NewSpanWriter(client, s.logger, metrics.NullFactory, 0, 0, indexPrefix) - s.SpanReader = spanstore.NewSpanReader(client, s.logger, 72*time.Hour, metrics.NullFactory, indexPrefix) + s.SpanWriter = spanstore.NewSpanWriter( + spanstore.SpanWriterParams{ + Client: client, + Logger: s.logger, + MetricsFactory: metrics.NullFactory, + IndexPrefix: indexPrefix, + AllTagsAsFields: allTagsAsFields, + TagDotReplacement: tagKeyDeDotChar, + }) + s.SpanReader = spanstore.NewSpanReader(spanstore.SpanReaderParams{ + Client: client, + Logger: s.logger, + MetricsFactory: metrics.NullFactory, + IndexPrefix: indexPrefix, + TagDotReplacement: tagKeyDeDotChar, + }) } func (s *ESStorageIntegration) esRefresh() error { @@ -107,7 +124,7 @@ func healthCheck() error { return errors.New("elastic search is not ready") } -func TestElasticsearchStorage(t *testing.T) { +func testElasticsearchStorage(t *testing.T, allTagsAsFields bool) { if os.Getenv("STORAGE") != "elasticsearch" { t.Skip("Integration test against ElasticSearch skipped; set STORAGE env var to elasticsearch to run this") } @@ -115,6 +132,14 @@ func TestElasticsearchStorage(t *testing.T) { t.Fatal(err) } s := &ESStorageIntegration{} - require.NoError(t, s.initializeES()) + require.NoError(t, s.initializeES(allTagsAsFields)) s.IntegrationTestAll(t) } + +func TestElasticsearchStorage(t *testing.T) { + testElasticsearchStorage(t, false) +} + +func TestElasticsearchStorageAllTagsAsObjectFields(t *testing.T) { + testElasticsearchStorage(t, true) +} diff --git a/plugin/storage/integration/integration_test.go b/plugin/storage/integration/integration_test.go index 1700e55e1b9f..fffd02653115 100644 --- a/plugin/storage/integration/integration_test.go +++ b/plugin/storage/integration/integration_test.go @@ -16,6 +16,7 @@ package integration import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -114,7 +115,7 @@ func (s *StorageIntegration) testGetServices(t *testing.T) { var actual []string found := s.waitForCondition(t, func(t *testing.T) bool { - actual, err := s.SpanReader.GetServices() + actual, err := s.SpanReader.GetServices(context.Background()) require.NoError(t, err) return assert.ObjectsAreEqualValues(expected, actual) }) @@ -136,7 +137,7 @@ func (s *StorageIntegration) testGetLargeSpan(t *testing.T) { var actual *model.Trace found := s.waitForCondition(t, func(t *testing.T) bool { var err error - actual, err = s.SpanReader.GetTrace(expectedTraceID) + actual, err = s.SpanReader.GetTrace(context.Background(), expectedTraceID) return err == nil && len(actual.Spans) == len(expected.Spans) }) if !assert.True(t, found) { @@ -154,7 +155,7 @@ func (s *StorageIntegration) testGetOperations(t *testing.T) { var actual []string found := s.waitForCondition(t, func(t *testing.T) bool { var err error - actual, err = s.SpanReader.GetOperations("example-service-1") + actual, err = s.SpanReader.GetOperations(context.Background(), "example-service-1") require.NoError(t, err) return assert.ObjectsAreEqualValues(expected, actual) }) @@ -175,7 +176,7 @@ func (s *StorageIntegration) testGetTrace(t *testing.T) { var actual *model.Trace found := s.waitForCondition(t, func(t *testing.T) bool { var err error - actual, err = s.SpanReader.GetTrace(expectedTraceID) + actual, err = s.SpanReader.GetTrace(context.Background(), expectedTraceID) if err != nil { t.Log(err) } @@ -224,7 +225,7 @@ func (s *StorageIntegration) findTracesByQuery(t *testing.T, query *spanstore.Tr var traces []*model.Trace found := s.waitForCondition(t, func(t *testing.T) bool { var err error - traces, err = s.SpanReader.FindTraces(query) + traces, err = s.SpanReader.FindTraces(context.Background(), query) if err == nil && tracesMatch(t, traces, expected) { return true } diff --git a/plugin/storage/integration/kafka_test.go b/plugin/storage/integration/kafka_test.go index 0d8c539bfbbd..479fed117660 100644 --- a/plugin/storage/integration/kafka_test.go +++ b/plugin/storage/integration/kafka_test.go @@ -15,6 +15,7 @@ package integration import ( + "context" "os" "strconv" "testing" @@ -96,19 +97,19 @@ type ingester struct { traceStore *memory.Store } -func (r *ingester) GetTrace(traceID model.TraceID) (*model.Trace, error) { - return r.traceStore.GetTrace(traceID) +func (r *ingester) GetTrace(ctx context.Context, traceID model.TraceID) (*model.Trace, error) { + return r.traceStore.GetTrace(ctx, traceID) } -func (r *ingester) GetServices() ([]string, error) { +func (r *ingester) GetServices(ctx context.Context) ([]string, error) { return nil, nil } -func (r *ingester) GetOperations(service string) ([]string, error) { +func (r *ingester) GetOperations(ctx context.Context, service string) ([]string, error) { return nil, nil } -func (r *ingester) FindTraces(query *spanstore.TraceQueryParameters) ([]*model.Trace, error) { +func (r *ingester) FindTraces(ctx context.Context, query *spanstore.TraceQueryParameters) ([]*model.Trace, error) { return nil, nil } diff --git a/plugin/storage/kafka/options.go b/plugin/storage/kafka/options.go index e93ea8a75f99..e1f6f3a57e2f 100644 --- a/plugin/storage/kafka/options.go +++ b/plugin/storage/kafka/options.go @@ -50,15 +50,15 @@ func (opt *Options) AddFlags(flagSet *flag.FlagSet) { flagSet.String( configPrefix+suffixBrokers, defaultBroker, - "The comma-separated list of kafka brokers. i.e. '127.0.0.1:9092,0.0.0:1234'") + "(experimental) The comma-separated list of kafka brokers. i.e. '127.0.0.1:9092,0.0.0:1234'") flagSet.String( configPrefix+suffixTopic, defaultTopic, - "The name of the kafka topic") + "(experimental) The name of the kafka topic") flagSet.String( configPrefix+suffixEncoding, defaultEncoding, - fmt.Sprintf(`Encoding of spans ("%s" or "%s") sent to kafka.`, encodingProto, encodingJSON), + fmt.Sprintf(`(experimental) Encoding of spans ("%s" or "%s") sent to kafka.`, encodingProto, encodingJSON), ) } diff --git a/plugin/storage/memory/memory.go b/plugin/storage/memory/memory.go index c8fa9a06b63c..e82c7210c092 100644 --- a/plugin/storage/memory/memory.go +++ b/plugin/storage/memory/memory.go @@ -15,6 +15,7 @@ package memory import ( + "context" "errors" "sync" "time" @@ -146,7 +147,7 @@ func (m *Store) WriteSpan(span *model.Span) error { } // GetTrace gets a trace -func (m *Store) GetTrace(traceID model.TraceID) (*model.Trace, error) { +func (m *Store) GetTrace(ctx context.Context, traceID model.TraceID) (*model.Trace, error) { m.RLock() defer m.RUnlock() retMe := m.traces[traceID] @@ -157,7 +158,7 @@ func (m *Store) GetTrace(traceID model.TraceID) (*model.Trace, error) { } // GetServices returns a list of all known services -func (m *Store) GetServices() ([]string, error) { +func (m *Store) GetServices(ctx context.Context) ([]string, error) { m.RLock() defer m.RUnlock() var retMe []string @@ -168,7 +169,7 @@ func (m *Store) GetServices() ([]string, error) { } // GetOperations returns the operations of a given service -func (m *Store) GetOperations(service string) ([]string, error) { +func (m *Store) GetOperations(ctx context.Context, service string) ([]string, error) { m.RLock() defer m.RUnlock() if operations, ok := m.operations[service]; ok { @@ -182,7 +183,7 @@ func (m *Store) GetOperations(service string) ([]string, error) { } // FindTraces returns all traces in the query parameters are satisfied by a trace's span -func (m *Store) FindTraces(query *spanstore.TraceQueryParameters) ([]*model.Trace, error) { +func (m *Store) FindTraces(ctx context.Context, query *spanstore.TraceQueryParameters) ([]*model.Trace, error) { m.RLock() defer m.RUnlock() var retMe []*model.Trace diff --git a/plugin/storage/memory/memory_test.go b/plugin/storage/memory/memory_test.go index e9e0a9a38137..f5e10e258c84 100644 --- a/plugin/storage/memory/memory_test.go +++ b/plugin/storage/memory/memory_test.go @@ -15,6 +15,7 @@ package memory import ( + "context" "testing" "time" @@ -198,7 +199,7 @@ func TestStoreWithLimit(t *testing.T) { func TestStoreGetTraceSuccess(t *testing.T) { withPopulatedMemoryStore(func(store *Store) { - trace, err := store.GetTrace(testingSpan.TraceID) + trace, err := store.GetTrace(context.Background(), testingSpan.TraceID) assert.NoError(t, err) assert.Len(t, trace.Spans, 1) assert.Equal(t, testingSpan, trace.Spans[0]) @@ -207,7 +208,7 @@ func TestStoreGetTraceSuccess(t *testing.T) { func TestStoreGetTraceFailure(t *testing.T) { withPopulatedMemoryStore(func(store *Store) { - trace, err := store.GetTrace(model.TraceID{}) + trace, err := store.GetTrace(context.Background(), model.TraceID{}) assert.EqualError(t, err, errTraceNotFound.Error()) assert.Nil(t, trace) }) @@ -215,7 +216,7 @@ func TestStoreGetTraceFailure(t *testing.T) { func TestStoreGetServices(t *testing.T) { withPopulatedMemoryStore(func(store *Store) { - serviceNames, err := store.GetServices() + serviceNames, err := store.GetServices(context.Background()) assert.NoError(t, err) assert.Len(t, serviceNames, 1) assert.EqualValues(t, testingSpan.Process.ServiceName, serviceNames[0]) @@ -224,7 +225,7 @@ func TestStoreGetServices(t *testing.T) { func TestStoreGetOperationsFound(t *testing.T) { withPopulatedMemoryStore(func(store *Store) { - operations, err := store.GetOperations(testingSpan.Process.ServiceName) + operations, err := store.GetOperations(context.Background(), testingSpan.Process.ServiceName) assert.NoError(t, err) assert.Len(t, operations, 1) assert.EqualValues(t, testingSpan.OperationName, operations[0]) @@ -233,7 +234,7 @@ func TestStoreGetOperationsFound(t *testing.T) { func TestStoreGetOperationsNotFound(t *testing.T) { withPopulatedMemoryStore(func(store *Store) { - operations, err := store.GetOperations("notAService") + operations, err := store.GetOperations(context.Background(), "notAService") assert.NoError(t, err) assert.Len(t, operations, 0) }) @@ -241,7 +242,7 @@ func TestStoreGetOperationsNotFound(t *testing.T) { func TestStoreGetEmptyTraceSet(t *testing.T) { withPopulatedMemoryStore(func(store *Store) { - traces, err := store.FindTraces(&spanstore.TraceQueryParameters{}) + traces, err := store.FindTraces(context.Background(), &spanstore.TraceQueryParameters{}) assert.NoError(t, err) assert.Len(t, traces, 0) }) @@ -313,7 +314,7 @@ func TestStoreGetTrace(t *testing.T) { for _, testS := range testStruct { withPopulatedMemoryStore(func(store *Store) { testS.query.NumTraces = 10 - traces, err := store.FindTraces(testS.query) + traces, err := store.FindTraces(context.Background(), testS.query) assert.NoError(t, err) if testS.traceFound { assert.Len(t, traces, 1) diff --git a/scripts/travis/build-all-in-one-image.sh b/scripts/travis/build-all-in-one-image.sh index 06896b0c69a6..933ef3bba2d7 100755 --- a/scripts/travis/build-all-in-one-image.sh +++ b/scripts/travis/build-all-in-one-image.sh @@ -10,7 +10,7 @@ make build-all-in-one-linux export REPO=jaegertracing/all-in-one -docker build -f cmd/standalone/Dockerfile -t $REPO:latest . +docker build -f cmd/all-in-one/Dockerfile -t $REPO:latest . export CID=$(docker run -d -p 16686:16686 -p 5778:5778 $REPO:latest) make integration-test docker kill $CID diff --git a/scripts/travis/build-docker-images.sh b/scripts/travis/build-docker-images.sh index 680331a2edd8..8c6491205cf8 100755 --- a/scripts/travis/build-docker-images.sh +++ b/scripts/travis/build-docker-images.sh @@ -23,7 +23,7 @@ nvm use 6 export DOCKER_NAMESPACE=jaegertracing make docker -for component in agent cassandra-schema es-index-cleaner collector query +for component in agent cassandra-schema es-index-cleaner collector query ingester do export REPO="jaegertracing/jaeger-${component}" bash ./scripts/travis/upload-to-docker.sh diff --git a/scripts/travis/package-deploy.sh b/scripts/travis/package-deploy.sh index 5bf02d599784..9eb0e1fb0477 100755 --- a/scripts/travis/package-deploy.sh +++ b/scripts/travis/package-deploy.sh @@ -20,10 +20,11 @@ function stage-platform-files { local PACKAGE_STAGING_DIR=$2 local FILE_EXTENSION=$3 - stage-file ./cmd/standalone/standalone-$PLATFORM $PACKAGE_STAGING_DIR/jaeger-standalone$FILE_EXTENSION + stage-file ./cmd/all-in-one/all-in-one-$PLATFORM $PACKAGE_STAGING_DIR/jaeger-all-in-one$FILE_EXTENSION stage-file ./cmd/agent/agent-$PLATFORM $PACKAGE_STAGING_DIR/jaeger-agent$FILE_EXTENSION stage-file ./cmd/query/query-$PLATFORM $PACKAGE_STAGING_DIR/jaeger-query$FILE_EXTENSION stage-file ./cmd/collector/collector-$PLATFORM $PACKAGE_STAGING_DIR/jaeger-collector$FILE_EXTENSION + stage-file ./cmd/ingester/ingester-$PLATFORM $PACKAGE_STAGING_DIR/jaeger-ingester$FILE_EXTENSION stage-file ./examples/hotrod/hotrod-$PLATFORM $PACKAGE_STAGING_DIR/example-hotrod$FILE_EXTENSION } diff --git a/scripts/travis/upload-to-docker.sh b/scripts/travis/upload-to-docker.sh old mode 100644 new mode 100755 index ba2df97c0e16..448a76608889 --- a/scripts/travis/upload-to-docker.sh +++ b/scripts/travis/upload-to-docker.sh @@ -36,3 +36,8 @@ set -x docker login -u $DOCKER_USER -p $DOCKER_PASS # push all tags, therefore push to repo docker push $REPO + +SNAPSHOT_IMAGE="$REPO-snapshot:$TRAVIS_COMMIT" +echo "Pushing snapshot image $SNAPSHOT_IMAGE" +docker tag $IMAGE $SNAPSHOT_IMAGE +docker push $SNAPSHOT_IMAGE diff --git a/storage/samplingstore/mocks/Store.go b/storage/samplingstore/mocks/Store.go index c2732080c415..3270df5f30a8 100644 --- a/storage/samplingstore/mocks/Store.go +++ b/storage/samplingstore/mocks/Store.go @@ -14,10 +14,13 @@ package mocks -import "github.com/jaegertracing/jaeger/cmd/collector/app/sampling/model" -import "github.com/stretchr/testify/mock" +import ( + "time" -import "time" + "github.com/stretchr/testify/mock" + + "github.com/jaegertracing/jaeger/cmd/collector/app/sampling/model" +) type Store struct { mock.Mock diff --git a/storage/spanstore/interface.go b/storage/spanstore/interface.go index 4b005d53ae84..eee40f1a57df 100644 --- a/storage/spanstore/interface.go +++ b/storage/spanstore/interface.go @@ -15,6 +15,7 @@ package spanstore import ( + "context" "errors" "time" @@ -33,10 +34,10 @@ var ( // Reader finds and loads traces and other data from storage. type Reader interface { - GetTrace(traceID model.TraceID) (*model.Trace, error) - GetServices() ([]string, error) - GetOperations(service string) ([]string, error) - FindTraces(query *TraceQueryParameters) ([]*model.Trace, error) + GetTrace(ctx context.Context, traceID model.TraceID) (*model.Trace, error) + GetServices(ctx context.Context) ([]string, error) + GetOperations(ctx context.Context, service string) ([]string, error) + FindTraces(ctx context.Context, query *TraceQueryParameters) ([]*model.Trace, error) } // TraceQueryParameters contains parameters of a trace query. diff --git a/storage/spanstore/metrics/decorator.go b/storage/spanstore/metrics/decorator.go index f82f0cb3139f..c013ddda5b51 100644 --- a/storage/spanstore/metrics/decorator.go +++ b/storage/spanstore/metrics/decorator.go @@ -15,6 +15,7 @@ package metrics import ( + "context" "time" "github.com/uber/jaeger-lib/metrics" @@ -33,16 +34,14 @@ type ReadMetricsDecorator struct { } type queryMetrics struct { - Errors metrics.Counter `metric:"errors"` - Attempts metrics.Counter `metric:"attempts"` - Successes metrics.Counter `metric:"successes"` - Responses metrics.Timer `metric:"responses"` //used as a histogram, not necessary for GetTrace - ErrLatency metrics.Timer `metric:"errLatency"` - OKLatency metrics.Timer `metric:"okLatency"` + Errors metrics.Counter + Successes metrics.Counter + Responses metrics.Timer //used as a histogram, not necessary for GetTrace + ErrLatency metrics.Timer + OKLatency metrics.Timer } func (q *queryMetrics) emit(err error, latency time.Duration, responses int) { - q.Attempts.Inc(1) if err != nil { q.Errors.Inc(1) q.ErrLatency.Record(latency) @@ -65,40 +64,45 @@ func NewReadMetricsDecorator(spanReader spanstore.Reader, metricsFactory metrics } func buildQueryMetrics(namespace string, metricsFactory metrics.Factory) *queryMetrics { - qMetrics := &queryMetrics{} scoped := metricsFactory.Namespace(namespace, nil) - metrics.Init(qMetrics, scoped, nil) + qMetrics := &queryMetrics{ + Errors: scoped.Counter("", map[string]string{"result": "err"}), + Successes: scoped.Counter("", map[string]string{"result": "ok"}), + Responses: scoped.Timer("responses", nil), + ErrLatency: scoped.Timer("latency", map[string]string{"result": "err"}), + OKLatency: scoped.Timer("latency", map[string]string{"result": "ok"}), + } return qMetrics } // FindTraces implements spanstore.Reader#FindTraces -func (m *ReadMetricsDecorator) FindTraces(traceQuery *spanstore.TraceQueryParameters) ([]*model.Trace, error) { +func (m *ReadMetricsDecorator) FindTraces(ctx context.Context, traceQuery *spanstore.TraceQueryParameters) ([]*model.Trace, error) { start := time.Now() - retMe, err := m.spanReader.FindTraces(traceQuery) + retMe, err := m.spanReader.FindTraces(ctx, traceQuery) m.findTracesMetrics.emit(err, time.Since(start), len(retMe)) return retMe, err } // GetTrace implements spanstore.Reader#GetTrace -func (m *ReadMetricsDecorator) GetTrace(traceID model.TraceID) (*model.Trace, error) { +func (m *ReadMetricsDecorator) GetTrace(ctx context.Context, traceID model.TraceID) (*model.Trace, error) { start := time.Now() - retMe, err := m.spanReader.GetTrace(traceID) + retMe, err := m.spanReader.GetTrace(ctx, traceID) m.getTraceMetrics.emit(err, time.Since(start), 1) return retMe, err } // GetServices implements spanstore.Reader#GetServices -func (m *ReadMetricsDecorator) GetServices() ([]string, error) { +func (m *ReadMetricsDecorator) GetServices(ctx context.Context) ([]string, error) { start := time.Now() - retMe, err := m.spanReader.GetServices() + retMe, err := m.spanReader.GetServices(ctx) m.getServicesMetrics.emit(err, time.Since(start), len(retMe)) return retMe, err } // GetOperations implements spanstore.Reader#GetOperations -func (m *ReadMetricsDecorator) GetOperations(service string) ([]string, error) { +func (m *ReadMetricsDecorator) GetOperations(ctx context.Context, service string) ([]string, error) { start := time.Now() - retMe, err := m.spanReader.GetOperations(service) + retMe, err := m.spanReader.GetOperations(ctx, service) m.getOperationsMetrics.emit(err, time.Since(start), len(retMe)) return retMe, err } diff --git a/storage/spanstore/metrics/decorator_test.go b/storage/spanstore/metrics/decorator_test.go index dbc68d974197..0fe9f352fe45 100644 --- a/storage/spanstore/metrics/decorator_test.go +++ b/storage/spanstore/metrics/decorator_test.go @@ -15,6 +15,7 @@ package metrics_test import ( + "context" "errors" "testing" @@ -33,36 +34,32 @@ func TestSuccessfulUnderlyingCalls(t *testing.T) { mockReader := mocks.Reader{} mrs := NewReadMetricsDecorator(&mockReader, mf) mockReader.On("GetServices").Return([]string{}, nil) - mrs.GetServices() + mrs.GetServices(context.Background()) mockReader.On("GetOperations", "something").Return([]string{}, nil) - mrs.GetOperations("something") + mrs.GetOperations(context.Background(), "something") mockReader.On("GetTrace", model.TraceID{}).Return(&model.Trace{}, nil) - mrs.GetTrace(model.TraceID{}) + mrs.GetTrace(context.Background(), model.TraceID{}) mockReader.On("FindTraces", &spanstore.TraceQueryParameters{}).Return([]*model.Trace{}, nil) - mrs.FindTraces(&spanstore.TraceQueryParameters{}) + mrs.FindTraces(context.Background(), &spanstore.TraceQueryParameters{}) counters, gauges := mf.Snapshot() expecteds := map[string]int64{ - "get_operations.attempts": 1, - "get_operations.successes": 1, - "get_operations.errors": 0, - "get_trace.attempts": 1, - "get_trace.successes": 1, - "get_trace.errors": 0, - "find_traces.attempts": 1, - "find_traces.successes": 1, - "find_traces.errors": 0, - "get_services.attempts": 1, - "get_services.successes": 1, - "get_services.errors": 0, + "get_operations|result=ok": 1, + "get_operations|result=err": 0, + "get_trace|result=ok": 1, + "get_trace|result=err": 0, + "find_traces|result=ok": 1, + "find_traces|result=err": 0, + "get_services|result=ok": 1, + "get_services|result=err": 0, } existingKeys := []string{ - "get_operations.okLatency.P50", + "get_operations.latency|result=ok.P50", "get_trace.responses.P50", - "find_traces.okLatency.P50", // this is not exhaustive + "find_traces.latency|result=ok.P50", // this is not exhaustive } nonExistentKeys := []string{ - "get_operations.errLatency.P50", + "get_operations.latency|result=err.P50", } checkExpectedExistingAndNonExistentCounters(t, counters, expecteds, gauges, existingKeys, nonExistentKeys) @@ -90,37 +87,33 @@ func TestFailingUnderlyingCalls(t *testing.T) { mockReader := mocks.Reader{} mrs := NewReadMetricsDecorator(&mockReader, mf) mockReader.On("GetServices").Return(nil, errors.New("Failure")) - mrs.GetServices() + mrs.GetServices(context.Background()) mockReader.On("GetOperations", "something").Return(nil, errors.New("Failure")) - mrs.GetOperations("something") + mrs.GetOperations(context.Background(), "something") mockReader.On("GetTrace", model.TraceID{}).Return(nil, errors.New("Failure")) - mrs.GetTrace(model.TraceID{}) + mrs.GetTrace(context.Background(), model.TraceID{}) mockReader.On("FindTraces", &spanstore.TraceQueryParameters{}).Return(nil, errors.New("Failure")) - mrs.FindTraces(&spanstore.TraceQueryParameters{}) + mrs.FindTraces(context.Background(), &spanstore.TraceQueryParameters{}) counters, gauges := mf.Snapshot() expecteds := map[string]int64{ - "get_operations.attempts": 1, - "get_operations.successes": 0, - "get_operations.errors": 1, - "get_trace.attempts": 1, - "get_trace.successes": 0, - "get_trace.errors": 1, - "find_traces.attempts": 1, - "find_traces.successes": 0, - "find_traces.errors": 1, - "get_services.attempts": 1, - "get_services.successes": 0, - "get_services.errors": 1, + "get_operations|result=ok": 0, + "get_operations|result=err": 1, + "get_trace|result=ok": 0, + "get_trace|result=err": 1, + "find_traces|result=ok": 0, + "find_traces|result=err": 1, + "get_services|result=ok": 0, + "get_services|result=err": 1, } existingKeys := []string{ - "get_operations.errLatency.P50", + "get_operations.latency|result=err.P50", } nonExistentKeys := []string{ - "get_operations.okLatency.P50", + "get_operations.latency|result=ok.P50", "get_trace.responses.P50", - "query.okLatency.P50", // this is not exhaustive + "query.latency|result=ok.P50", // this is not exhaustive } checkExpectedExistingAndNonExistentCounters(t, counters, expecteds, gauges, existingKeys, nonExistentKeys) diff --git a/storage/spanstore/mocks/Reader.go b/storage/spanstore/mocks/Reader.go index 236eef8bc7ca..7653970609c6 100644 --- a/storage/spanstore/mocks/Reader.go +++ b/storage/spanstore/mocks/Reader.go @@ -14,9 +14,14 @@ package mocks -import mock "github.com/stretchr/testify/mock" -import model "github.com/jaegertracing/jaeger/model" -import spanstore "github.com/jaegertracing/jaeger/storage/spanstore" +import ( + "context" + + mock "github.com/stretchr/testify/mock" + + model "github.com/jaegertracing/jaeger/model" + spanstore "github.com/jaegertracing/jaeger/storage/spanstore" +) // Reader is an autogenerated mock type for the Reader type type Reader struct { @@ -24,7 +29,7 @@ type Reader struct { } // FindTraces provides a mock function with given fields: query -func (_m *Reader) FindTraces(query *spanstore.TraceQueryParameters) ([]*model.Trace, error) { +func (_m *Reader) FindTraces(ctx context.Context, query *spanstore.TraceQueryParameters) ([]*model.Trace, error) { ret := _m.Called(query) var r0 []*model.Trace @@ -47,7 +52,7 @@ func (_m *Reader) FindTraces(query *spanstore.TraceQueryParameters) ([]*model.Tr } // GetOperations provides a mock function with given fields: service -func (_m *Reader) GetOperations(service string) ([]string, error) { +func (_m *Reader) GetOperations(ctx context.Context, service string) ([]string, error) { ret := _m.Called(service) var r0 []string @@ -70,7 +75,7 @@ func (_m *Reader) GetOperations(service string) ([]string, error) { } // GetServices provides a mock function with given fields: -func (_m *Reader) GetServices() ([]string, error) { +func (_m *Reader) GetServices(ctx context.Context) ([]string, error) { ret := _m.Called() var r0 []string @@ -93,7 +98,7 @@ func (_m *Reader) GetServices() ([]string, error) { } // GetTrace provides a mock function with given fields: traceID -func (_m *Reader) GetTrace(traceID model.TraceID) (*model.Trace, error) { +func (_m *Reader) GetTrace(ctx context.Context, traceID model.TraceID) (*model.Trace, error) { ret := _m.Called(traceID) var r0 *model.Trace diff --git a/thrift-gen/agent/agent.go b/thrift-gen/agent/agent.go index 874b14723aa2..7ce6521d51d6 100644 --- a/thrift-gen/agent/agent.go +++ b/thrift-gen/agent/agent.go @@ -6,6 +6,7 @@ package agent import ( "bytes" "fmt" + "github.com/apache/thrift/lib/go/thrift" "github.com/jaegertracing/jaeger/thrift-gen/jaeger" "github.com/jaegertracing/jaeger/thrift-gen/zipkincore" diff --git a/thrift-gen/agent/constants.go b/thrift-gen/agent/constants.go index 3cfe4787f7fb..5c3ebdca92b6 100644 --- a/thrift-gen/agent/constants.go +++ b/thrift-gen/agent/constants.go @@ -6,6 +6,7 @@ package agent import ( "bytes" "fmt" + "github.com/apache/thrift/lib/go/thrift" "github.com/jaegertracing/jaeger/thrift-gen/jaeger" "github.com/jaegertracing/jaeger/thrift-gen/zipkincore" diff --git a/thrift-gen/agent/ttypes.go b/thrift-gen/agent/ttypes.go index 7ad797b0480b..5f27c397147d 100644 --- a/thrift-gen/agent/ttypes.go +++ b/thrift-gen/agent/ttypes.go @@ -6,6 +6,7 @@ package agent import ( "bytes" "fmt" + "github.com/apache/thrift/lib/go/thrift" "github.com/jaegertracing/jaeger/thrift-gen/jaeger" "github.com/jaegertracing/jaeger/thrift-gen/zipkincore" diff --git a/thrift-gen/baggage/baggagerestrictionmanager.go b/thrift-gen/baggage/baggagerestrictionmanager.go index 1931cd72c488..69bc8841b396 100644 --- a/thrift-gen/baggage/baggagerestrictionmanager.go +++ b/thrift-gen/baggage/baggagerestrictionmanager.go @@ -6,6 +6,7 @@ package baggage import ( "bytes" "fmt" + "github.com/apache/thrift/lib/go/thrift" ) diff --git a/thrift-gen/baggage/constants.go b/thrift-gen/baggage/constants.go index 6668424a59cc..425a621532da 100644 --- a/thrift-gen/baggage/constants.go +++ b/thrift-gen/baggage/constants.go @@ -6,6 +6,7 @@ package baggage import ( "bytes" "fmt" + "github.com/apache/thrift/lib/go/thrift" ) diff --git a/thrift-gen/baggage/ttypes.go b/thrift-gen/baggage/ttypes.go index be442fbd91a9..c0169dd449ea 100644 --- a/thrift-gen/baggage/ttypes.go +++ b/thrift-gen/baggage/ttypes.go @@ -6,6 +6,7 @@ package baggage import ( "bytes" "fmt" + "github.com/apache/thrift/lib/go/thrift" ) diff --git a/thrift-gen/jaeger/agent.go b/thrift-gen/jaeger/agent.go index db6cac9fc190..172dabded006 100644 --- a/thrift-gen/jaeger/agent.go +++ b/thrift-gen/jaeger/agent.go @@ -6,6 +6,7 @@ package jaeger import ( "bytes" "fmt" + "github.com/apache/thrift/lib/go/thrift" ) diff --git a/thrift-gen/jaeger/collector.go b/thrift-gen/jaeger/collector.go index d5750bd9badc..f9bd36f7ce87 100644 --- a/thrift-gen/jaeger/collector.go +++ b/thrift-gen/jaeger/collector.go @@ -6,6 +6,7 @@ package jaeger import ( "bytes" "fmt" + "github.com/apache/thrift/lib/go/thrift" ) diff --git a/thrift-gen/jaeger/constants.go b/thrift-gen/jaeger/constants.go index 250474222a49..414aa673ba99 100644 --- a/thrift-gen/jaeger/constants.go +++ b/thrift-gen/jaeger/constants.go @@ -6,6 +6,7 @@ package jaeger import ( "bytes" "fmt" + "github.com/apache/thrift/lib/go/thrift" ) diff --git a/thrift-gen/jaeger/ttypes.go b/thrift-gen/jaeger/ttypes.go index b5ddfa645908..836afc1b5776 100644 --- a/thrift-gen/jaeger/ttypes.go +++ b/thrift-gen/jaeger/ttypes.go @@ -6,6 +6,7 @@ package jaeger import ( "bytes" "fmt" + "github.com/apache/thrift/lib/go/thrift" ) diff --git a/thrift-gen/sampling/constants.go b/thrift-gen/sampling/constants.go index 728988b8387e..1c6ade8e2105 100644 --- a/thrift-gen/sampling/constants.go +++ b/thrift-gen/sampling/constants.go @@ -6,6 +6,7 @@ package sampling import ( "bytes" "fmt" + "github.com/apache/thrift/lib/go/thrift" ) diff --git a/thrift-gen/sampling/samplingmanager.go b/thrift-gen/sampling/samplingmanager.go index 563e2b4c9e6a..1c28aaf0e300 100644 --- a/thrift-gen/sampling/samplingmanager.go +++ b/thrift-gen/sampling/samplingmanager.go @@ -6,6 +6,7 @@ package sampling import ( "bytes" "fmt" + "github.com/apache/thrift/lib/go/thrift" ) diff --git a/thrift-gen/sampling/ttypes.go b/thrift-gen/sampling/ttypes.go index 3e831af4a65f..490c4b07dcd4 100644 --- a/thrift-gen/sampling/ttypes.go +++ b/thrift-gen/sampling/ttypes.go @@ -6,6 +6,7 @@ package sampling import ( "bytes" "fmt" + "github.com/apache/thrift/lib/go/thrift" ) diff --git a/thrift-gen/zipkincore/constants.go b/thrift-gen/zipkincore/constants.go index 0f0b6bc03b7e..c84b0fdb3096 100644 --- a/thrift-gen/zipkincore/constants.go +++ b/thrift-gen/zipkincore/constants.go @@ -6,6 +6,7 @@ package zipkincore import ( "bytes" "fmt" + "github.com/apache/thrift/lib/go/thrift" ) diff --git a/thrift-gen/zipkincore/ttypes.go b/thrift-gen/zipkincore/ttypes.go index 8fbf2ad49b29..ed599ae44b94 100644 --- a/thrift-gen/zipkincore/ttypes.go +++ b/thrift-gen/zipkincore/ttypes.go @@ -6,6 +6,7 @@ package zipkincore import ( "bytes" "fmt" + "github.com/apache/thrift/lib/go/thrift" ) diff --git a/thrift-gen/zipkincore/zipkincollector.go b/thrift-gen/zipkincore/zipkincollector.go index bcc54f36efb5..ff35f002bf1d 100644 --- a/thrift-gen/zipkincore/zipkincollector.go +++ b/thrift-gen/zipkincore/zipkincollector.go @@ -6,6 +6,7 @@ package zipkincore import ( "bytes" "fmt" + "github.com/apache/thrift/lib/go/thrift" )