From 76db4c043865b6d366dd012c638f931878be72fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Juan=20=C3=81lvarez?= Date: Tue, 5 May 2020 07:58:20 +0200 Subject: [PATCH] Cherry-pick #17938 to 7.x: Instrument beat pipeline (#18171) * [APM] Instrument beat pipeline (#17938) This allows beat users to instrument the publishing pipeline by setting ELASTIC_APM_ACTIVE=true in the environment. Co-authored-by: Gil Raphaelli Co-authored-by: Andrew Wilkins --- CHANGELOG-developer.next.asciidoc | 1 + CHANGELOG.next.asciidoc | 1 + NOTICE.txt | 181 +++ go.mod | 3 + go.sum | 15 + libbeat/cmd/instance/beat.go | 10 + .../breaking/breaking-7.8.asciidoc | 25 + .../release-notes/breaking/breaking.asciidoc | 3 + libbeat/esleg/eslegclient/bulkapi.go | 8 + .../eslegclient/bulkapi_integration_test.go | 7 +- .../esleg/eslegclient/bulkapi_mock_test.go | 7 +- libbeat/esleg/eslegclient/connection.go | 8 +- .../monitoring/report/elasticsearch/client.go | 12 +- libbeat/outputs/backoff.go | 5 +- libbeat/outputs/console/console.go | 3 +- libbeat/outputs/console/console_test.go | 3 +- libbeat/outputs/elasticsearch/client.go | 26 +- .../elasticsearch/client_integration_test.go | 48 +- libbeat/outputs/elasticsearch/client_test.go | 3 +- libbeat/outputs/failover.go | 5 +- libbeat/outputs/fileout/file.go | 5 +- libbeat/outputs/kafka/client.go | 3 +- .../outputs/kafka/kafka_integration_test.go | 3 +- libbeat/outputs/logstash/async.go | 3 +- libbeat/outputs/logstash/async_test.go | 3 +- .../logstash/logstash_integration_test.go | 7 +- libbeat/outputs/logstash/logstash_test.go | 3 +- libbeat/outputs/logstash/sync.go | 3 +- libbeat/outputs/logstash/sync_test.go | 3 +- libbeat/outputs/outputs.go | 5 +- libbeat/outputs/redis/backoff.go | 5 +- libbeat/outputs/redis/client.go | 3 +- .../outputs/redis/redis_integration_test.go | 3 +- libbeat/publisher/pipeline/controller.go | 2 +- libbeat/publisher/pipeline/module.go | 3 + libbeat/publisher/pipeline/nilpipeline.go | 4 +- libbeat/publisher/pipeline/output.go | 38 +- libbeat/publisher/pipeline/output_test.go | 57 +- libbeat/publisher/pipeline/stress/out.go | 3 +- libbeat/publisher/pipeline/testing.go | 3 +- vendor/github.com/armon/go-radix/.gitignore | 22 + vendor/github.com/armon/go-radix/.travis.yml | 3 + vendor/github.com/armon/go-radix/LICENSE | 20 + vendor/github.com/armon/go-radix/README.md | 38 + vendor/github.com/armon/go-radix/go.mod | 1 + vendor/github.com/armon/go-radix/radix.go | 540 +++++++ .../santhosh-tekuri/jsonschema/.travis.yml | 10 + .../santhosh-tekuri/jsonschema/LICENSE | 27 + .../santhosh-tekuri/jsonschema/README.md | 148 ++ .../santhosh-tekuri/jsonschema/compiler.go | 534 +++++++ .../jsonschema/decoders/decoders.go | 32 + .../santhosh-tekuri/jsonschema/doc.go | 77 + .../santhosh-tekuri/jsonschema/draft4.go | 172 +++ .../santhosh-tekuri/jsonschema/draft6.go | 170 +++ .../santhosh-tekuri/jsonschema/draft7.go | 196 +++ .../santhosh-tekuri/jsonschema/errors.go | 122 ++ .../jsonschema/formats/formats.go | 295 ++++ .../santhosh-tekuri/jsonschema/go.mod | 1 + .../santhosh-tekuri/jsonschema/go.test.sh | 12 + .../jsonschema/loader/loader.go | 105 ++ .../jsonschema/mediatypes/mediatypes.go | 39 + .../santhosh-tekuri/jsonschema/resource.go | 236 +++ .../santhosh-tekuri/jsonschema/schema.go | 558 +++++++ .../github.com/stretchr/testify/suite/doc.go | 65 + .../stretchr/testify/suite/interfaces.go | 46 + .../stretchr/testify/suite/suite.go | 166 +++ vendor/go.elastic.co/apm/.dockerignore | 2 + vendor/go.elastic.co/apm/.gitignore | 5 + vendor/go.elastic.co/apm/.jenkins-edge.yml | 2 + vendor/go.elastic.co/apm/.jenkins.yml | 8 + vendor/go.elastic.co/apm/CHANGELOG.asciidoc | 262 ++++ vendor/go.elastic.co/apm/CHANGELOG.md | 1 + vendor/go.elastic.co/apm/CODE_OF_CONDUCT.md | 3 + vendor/go.elastic.co/apm/CONTRIBUTING.md | 91 ++ vendor/go.elastic.co/apm/Jenkinsfile | 294 ++++ vendor/go.elastic.co/apm/LICENSE | 201 +++ vendor/go.elastic.co/apm/Makefile | 81 + vendor/go.elastic.co/apm/NOTICE | 84 ++ vendor/go.elastic.co/apm/README.md | 41 + vendor/go.elastic.co/apm/apmconfig/doc.go | 20 + vendor/go.elastic.co/apm/apmconfig/watcher.go | 54 + .../apm/apmtest/configwatcher.go | 32 + vendor/go.elastic.co/apm/apmtest/discard.go | 52 + vendor/go.elastic.co/apm/apmtest/httpsuite.go | 137 ++ vendor/go.elastic.co/apm/apmtest/recorder.go | 69 + .../go.elastic.co/apm/apmtest/recordlogger.go | 60 + .../go.elastic.co/apm/apmtest/testlogger.go | 45 + .../apm/apmtest/withtransaction.go | 39 + vendor/go.elastic.co/apm/breakdown.go | 365 +++++ vendor/go.elastic.co/apm/builtin_metrics.go | 164 +++ vendor/go.elastic.co/apm/capturebody.go | 198 +++ vendor/go.elastic.co/apm/config.go | 448 ++++++ vendor/go.elastic.co/apm/context.go | 256 ++++ vendor/go.elastic.co/apm/doc.go | 21 + vendor/go.elastic.co/apm/error.go | 696 +++++++++ vendor/go.elastic.co/apm/error_unix.go | 30 + vendor/go.elastic.co/apm/error_windows.go | 27 + vendor/go.elastic.co/apm/fmt.go | 85 ++ vendor/go.elastic.co/apm/fnv.go | 42 + vendor/go.elastic.co/apm/go.mod | 17 + vendor/go.elastic.co/apm/go.sum | 53 + vendor/go.elastic.co/apm/gocontext.go | 138 ++ vendor/go.elastic.co/apm/gofuzz.go | 270 ++++ .../apm/internal/apmcontext/context.go | 78 + .../apm/internal/apmhostutil/container.go | 34 + .../internal/apmhostutil/container_linux.go | 156 ++ .../apmhostutil/container_nonlinux.go | 36 + .../apm/internal/apmhttputil/forwarded.go | 74 + .../apm/internal/apmhttputil/remoteaddr.go | 60 + .../apm/internal/apmhttputil/url.go | 113 ++ .../apm/internal/apmlog/logger.go | 173 +++ .../apm/internal/apmschema/schema.go | 69 + .../apm/internal/apmschema/update.sh | 37 + .../apm/internal/apmstrings/truncate.go | 31 + .../apm/internal/apmversion/version.go | 23 + .../apm/internal/configutil/duration.go | 73 + .../apm/internal/configutil/env.go | 95 ++ .../apm/internal/configutil/list.go | 34 + .../apm/internal/configutil/size.go | 105 ++ .../apm/internal/configutil/wildcards.go | 62 + .../go.elastic.co/apm/internal/iochan/doc.go | 19 + .../apm/internal/iochan/reader.go | 110 ++ .../apm/internal/pkgerrorsutil/pkgerrors.go | 60 + .../apm/internal/ringbuffer/buffer.go | 142 ++ .../apm/internal/ringbuffer/doc.go | 22 + .../apm/internal/wildcard/doc.go | 19 + .../apm/internal/wildcard/matcher.go | 142 ++ .../apm/internal/wildcard/matchers.go | 31 + vendor/go.elastic.co/apm/logger.go | 54 + vendor/go.elastic.co/apm/metrics.go | 161 ++ vendor/go.elastic.co/apm/model/doc.go | 21 + vendor/go.elastic.co/apm/model/generate.sh | 4 + vendor/go.elastic.co/apm/model/gofuzz.go | 82 ++ vendor/go.elastic.co/apm/model/maps.go | 48 + vendor/go.elastic.co/apm/model/marshal.go | 639 ++++++++ .../apm/model/marshal_fastjson.go | 1297 +++++++++++++++++ vendor/go.elastic.co/apm/model/model.go | 671 +++++++++ vendor/go.elastic.co/apm/modelwriter.go | 267 ++++ .../apm/module/apmelasticsearch/LICENSE | 201 +++ .../apm/module/apmelasticsearch/client.go | 239 +++ .../apm/module/apmelasticsearch/doc.go | 20 + .../apm/module/apmelasticsearch/go.mod | 14 + .../apm/module/apmelasticsearch/go.sum | 62 + .../module/apmelasticsearch/requestname.go | 39 + .../apmelasticsearch/requestname_go19.go | 30 + .../go.elastic.co/apm/module/apmhttp/LICENSE | 201 +++ .../apm/module/apmhttp/client.go | 200 +++ .../apm/module/apmhttp/context.go | 40 + .../go.elastic.co/apm/module/apmhttp/doc.go | 20 + .../go.elastic.co/apm/module/apmhttp/go.mod | 13 + .../go.elastic.co/apm/module/apmhttp/go.sum | 62 + .../apm/module/apmhttp/handler.go | 330 +++++ .../apm/module/apmhttp/ignorer.go | 81 + .../apm/module/apmhttp/recovery.go | 60 + .../apm/module/apmhttp/requestname.go | 56 + .../apm/module/apmhttp/requestname_go19.go | 46 + .../apm/module/apmhttp/traceheaders.go | 168 +++ vendor/go.elastic.co/apm/profiling.go | 164 +++ vendor/go.elastic.co/apm/sampler.go | 66 + vendor/go.elastic.co/apm/sanitizer.go | 66 + vendor/go.elastic.co/apm/span.go | 415 ++++++ vendor/go.elastic.co/apm/spancontext.go | 193 +++ vendor/go.elastic.co/apm/stacktrace.go | 52 + .../go.elastic.co/apm/stacktrace/context.go | 100 ++ vendor/go.elastic.co/apm/stacktrace/doc.go | 20 + vendor/go.elastic.co/apm/stacktrace/frame.go | 34 + .../apm/stacktrace/generate_library.bash | 77 + .../go.elastic.co/apm/stacktrace/library.go | 253 ++++ .../apm/stacktrace/stacktrace.go | 162 ++ vendor/go.elastic.co/apm/tracecontext.go | 263 ++++ vendor/go.elastic.co/apm/tracer.go | 1170 +++++++++++++++ vendor/go.elastic.co/apm/tracer_stats.go | 52 + vendor/go.elastic.co/apm/transaction.go | 324 ++++ vendor/go.elastic.co/apm/transport/api.go | 33 + vendor/go.elastic.co/apm/transport/default.go | 54 + vendor/go.elastic.co/apm/transport/discard.go | 31 + vendor/go.elastic.co/apm/transport/doc.go | 20 + vendor/go.elastic.co/apm/transport/http.go | 638 ++++++++ .../apm/transport/transporttest/doc.go | 20 + .../apm/transport/transporttest/err.go | 54 + .../apm/transport/transporttest/recorder.go | 203 +++ vendor/go.elastic.co/apm/utils.go | 242 +++ vendor/go.elastic.co/apm/utils_linux.go | 40 + vendor/go.elastic.co/apm/utils_other.go | 38 + vendor/go.elastic.co/apm/version.go | 23 + vendor/go.elastic.co/fastjson/.travis.yml | 9 + vendor/go.elastic.co/fastjson/LICENSE | 23 + vendor/go.elastic.co/fastjson/README.md | 134 ++ vendor/go.elastic.co/fastjson/doc.go | 23 + vendor/go.elastic.co/fastjson/go.mod | 3 + vendor/go.elastic.co/fastjson/go.sum | 2 + vendor/go.elastic.co/fastjson/marshaler.go | 151 ++ vendor/go.elastic.co/fastjson/writer.go | 181 +++ vendor/modules.txt | 35 + 194 files changed, 20792 insertions(+), 61 deletions(-) create mode 100644 libbeat/docs/release-notes/breaking/breaking-7.8.asciidoc create mode 100644 vendor/github.com/armon/go-radix/.gitignore create mode 100644 vendor/github.com/armon/go-radix/.travis.yml create mode 100644 vendor/github.com/armon/go-radix/LICENSE create mode 100644 vendor/github.com/armon/go-radix/README.md create mode 100644 vendor/github.com/armon/go-radix/go.mod create mode 100644 vendor/github.com/armon/go-radix/radix.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/.travis.yml create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/LICENSE create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/README.md create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/compiler.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/decoders/decoders.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/doc.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/draft4.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/draft6.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/draft7.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/errors.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/formats/formats.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/go.mod create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/go.test.sh create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/loader/loader.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/mediatypes/mediatypes.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/resource.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/schema.go create mode 100644 vendor/github.com/stretchr/testify/suite/doc.go create mode 100644 vendor/github.com/stretchr/testify/suite/interfaces.go create mode 100644 vendor/github.com/stretchr/testify/suite/suite.go create mode 100644 vendor/go.elastic.co/apm/.dockerignore create mode 100644 vendor/go.elastic.co/apm/.gitignore create mode 100644 vendor/go.elastic.co/apm/.jenkins-edge.yml create mode 100644 vendor/go.elastic.co/apm/.jenkins.yml create mode 100644 vendor/go.elastic.co/apm/CHANGELOG.asciidoc create mode 100644 vendor/go.elastic.co/apm/CHANGELOG.md create mode 100644 vendor/go.elastic.co/apm/CODE_OF_CONDUCT.md create mode 100644 vendor/go.elastic.co/apm/CONTRIBUTING.md create mode 100644 vendor/go.elastic.co/apm/Jenkinsfile create mode 100644 vendor/go.elastic.co/apm/LICENSE create mode 100644 vendor/go.elastic.co/apm/Makefile create mode 100644 vendor/go.elastic.co/apm/NOTICE create mode 100644 vendor/go.elastic.co/apm/README.md create mode 100644 vendor/go.elastic.co/apm/apmconfig/doc.go create mode 100644 vendor/go.elastic.co/apm/apmconfig/watcher.go create mode 100644 vendor/go.elastic.co/apm/apmtest/configwatcher.go create mode 100644 vendor/go.elastic.co/apm/apmtest/discard.go create mode 100644 vendor/go.elastic.co/apm/apmtest/httpsuite.go create mode 100644 vendor/go.elastic.co/apm/apmtest/recorder.go create mode 100644 vendor/go.elastic.co/apm/apmtest/recordlogger.go create mode 100644 vendor/go.elastic.co/apm/apmtest/testlogger.go create mode 100644 vendor/go.elastic.co/apm/apmtest/withtransaction.go create mode 100644 vendor/go.elastic.co/apm/breakdown.go create mode 100644 vendor/go.elastic.co/apm/builtin_metrics.go create mode 100644 vendor/go.elastic.co/apm/capturebody.go create mode 100644 vendor/go.elastic.co/apm/config.go create mode 100644 vendor/go.elastic.co/apm/context.go create mode 100644 vendor/go.elastic.co/apm/doc.go create mode 100644 vendor/go.elastic.co/apm/error.go create mode 100644 vendor/go.elastic.co/apm/error_unix.go create mode 100644 vendor/go.elastic.co/apm/error_windows.go create mode 100644 vendor/go.elastic.co/apm/fmt.go create mode 100644 vendor/go.elastic.co/apm/fnv.go create mode 100644 vendor/go.elastic.co/apm/go.mod create mode 100644 vendor/go.elastic.co/apm/go.sum create mode 100644 vendor/go.elastic.co/apm/gocontext.go create mode 100644 vendor/go.elastic.co/apm/gofuzz.go create mode 100644 vendor/go.elastic.co/apm/internal/apmcontext/context.go create mode 100644 vendor/go.elastic.co/apm/internal/apmhostutil/container.go create mode 100644 vendor/go.elastic.co/apm/internal/apmhostutil/container_linux.go create mode 100644 vendor/go.elastic.co/apm/internal/apmhostutil/container_nonlinux.go create mode 100644 vendor/go.elastic.co/apm/internal/apmhttputil/forwarded.go create mode 100644 vendor/go.elastic.co/apm/internal/apmhttputil/remoteaddr.go create mode 100644 vendor/go.elastic.co/apm/internal/apmhttputil/url.go create mode 100644 vendor/go.elastic.co/apm/internal/apmlog/logger.go create mode 100644 vendor/go.elastic.co/apm/internal/apmschema/schema.go create mode 100644 vendor/go.elastic.co/apm/internal/apmschema/update.sh create mode 100644 vendor/go.elastic.co/apm/internal/apmstrings/truncate.go create mode 100644 vendor/go.elastic.co/apm/internal/apmversion/version.go create mode 100644 vendor/go.elastic.co/apm/internal/configutil/duration.go create mode 100644 vendor/go.elastic.co/apm/internal/configutil/env.go create mode 100644 vendor/go.elastic.co/apm/internal/configutil/list.go create mode 100644 vendor/go.elastic.co/apm/internal/configutil/size.go create mode 100644 vendor/go.elastic.co/apm/internal/configutil/wildcards.go create mode 100644 vendor/go.elastic.co/apm/internal/iochan/doc.go create mode 100644 vendor/go.elastic.co/apm/internal/iochan/reader.go create mode 100644 vendor/go.elastic.co/apm/internal/pkgerrorsutil/pkgerrors.go create mode 100644 vendor/go.elastic.co/apm/internal/ringbuffer/buffer.go create mode 100644 vendor/go.elastic.co/apm/internal/ringbuffer/doc.go create mode 100644 vendor/go.elastic.co/apm/internal/wildcard/doc.go create mode 100644 vendor/go.elastic.co/apm/internal/wildcard/matcher.go create mode 100644 vendor/go.elastic.co/apm/internal/wildcard/matchers.go create mode 100644 vendor/go.elastic.co/apm/logger.go create mode 100644 vendor/go.elastic.co/apm/metrics.go create mode 100644 vendor/go.elastic.co/apm/model/doc.go create mode 100644 vendor/go.elastic.co/apm/model/generate.sh create mode 100644 vendor/go.elastic.co/apm/model/gofuzz.go create mode 100644 vendor/go.elastic.co/apm/model/maps.go create mode 100644 vendor/go.elastic.co/apm/model/marshal.go create mode 100644 vendor/go.elastic.co/apm/model/marshal_fastjson.go create mode 100644 vendor/go.elastic.co/apm/model/model.go create mode 100644 vendor/go.elastic.co/apm/modelwriter.go create mode 100644 vendor/go.elastic.co/apm/module/apmelasticsearch/LICENSE create mode 100644 vendor/go.elastic.co/apm/module/apmelasticsearch/client.go create mode 100644 vendor/go.elastic.co/apm/module/apmelasticsearch/doc.go create mode 100644 vendor/go.elastic.co/apm/module/apmelasticsearch/go.mod create mode 100644 vendor/go.elastic.co/apm/module/apmelasticsearch/go.sum create mode 100644 vendor/go.elastic.co/apm/module/apmelasticsearch/requestname.go create mode 100644 vendor/go.elastic.co/apm/module/apmelasticsearch/requestname_go19.go create mode 100644 vendor/go.elastic.co/apm/module/apmhttp/LICENSE create mode 100644 vendor/go.elastic.co/apm/module/apmhttp/client.go create mode 100644 vendor/go.elastic.co/apm/module/apmhttp/context.go create mode 100644 vendor/go.elastic.co/apm/module/apmhttp/doc.go create mode 100644 vendor/go.elastic.co/apm/module/apmhttp/go.mod create mode 100644 vendor/go.elastic.co/apm/module/apmhttp/go.sum create mode 100644 vendor/go.elastic.co/apm/module/apmhttp/handler.go create mode 100644 vendor/go.elastic.co/apm/module/apmhttp/ignorer.go create mode 100644 vendor/go.elastic.co/apm/module/apmhttp/recovery.go create mode 100644 vendor/go.elastic.co/apm/module/apmhttp/requestname.go create mode 100644 vendor/go.elastic.co/apm/module/apmhttp/requestname_go19.go create mode 100644 vendor/go.elastic.co/apm/module/apmhttp/traceheaders.go create mode 100644 vendor/go.elastic.co/apm/profiling.go create mode 100644 vendor/go.elastic.co/apm/sampler.go create mode 100644 vendor/go.elastic.co/apm/sanitizer.go create mode 100644 vendor/go.elastic.co/apm/span.go create mode 100644 vendor/go.elastic.co/apm/spancontext.go create mode 100644 vendor/go.elastic.co/apm/stacktrace.go create mode 100644 vendor/go.elastic.co/apm/stacktrace/context.go create mode 100644 vendor/go.elastic.co/apm/stacktrace/doc.go create mode 100644 vendor/go.elastic.co/apm/stacktrace/frame.go create mode 100644 vendor/go.elastic.co/apm/stacktrace/generate_library.bash create mode 100644 vendor/go.elastic.co/apm/stacktrace/library.go create mode 100644 vendor/go.elastic.co/apm/stacktrace/stacktrace.go create mode 100644 vendor/go.elastic.co/apm/tracecontext.go create mode 100644 vendor/go.elastic.co/apm/tracer.go create mode 100644 vendor/go.elastic.co/apm/tracer_stats.go create mode 100644 vendor/go.elastic.co/apm/transaction.go create mode 100644 vendor/go.elastic.co/apm/transport/api.go create mode 100644 vendor/go.elastic.co/apm/transport/default.go create mode 100644 vendor/go.elastic.co/apm/transport/discard.go create mode 100644 vendor/go.elastic.co/apm/transport/doc.go create mode 100644 vendor/go.elastic.co/apm/transport/http.go create mode 100644 vendor/go.elastic.co/apm/transport/transporttest/doc.go create mode 100644 vendor/go.elastic.co/apm/transport/transporttest/err.go create mode 100644 vendor/go.elastic.co/apm/transport/transporttest/recorder.go create mode 100644 vendor/go.elastic.co/apm/utils.go create mode 100644 vendor/go.elastic.co/apm/utils_linux.go create mode 100644 vendor/go.elastic.co/apm/utils_other.go create mode 100644 vendor/go.elastic.co/apm/version.go create mode 100644 vendor/go.elastic.co/fastjson/.travis.yml create mode 100644 vendor/go.elastic.co/fastjson/LICENSE create mode 100644 vendor/go.elastic.co/fastjson/README.md create mode 100644 vendor/go.elastic.co/fastjson/doc.go create mode 100644 vendor/go.elastic.co/fastjson/go.mod create mode 100644 vendor/go.elastic.co/fastjson/go.sum create mode 100644 vendor/go.elastic.co/fastjson/marshaler.go create mode 100644 vendor/go.elastic.co/fastjson/writer.go diff --git a/CHANGELOG-developer.next.asciidoc b/CHANGELOG-developer.next.asciidoc index 812dba7078f..391eb87bfd4 100644 --- a/CHANGELOG-developer.next.asciidoc +++ b/CHANGELOG-developer.next.asciidoc @@ -36,6 +36,7 @@ The list below covers the major changes between 7.0.0-rc2 and master only. - Extract Elasticsearch client logic from `outputs/elasticsearch` package into new `esclientleg` package. {pull}16150[16150] - Rename `queue.BufferConfig.Events` to `queue.BufferConfig.MaxEvents`. {pull}17622[17622] - Remove `queue.Feature` and replace `queue.RegisterType` with `queue.RegisterQueueType`. {pull}17666[17666] +- Introduce APM libbeat instrumentation. `Publish` method on `Client` interface now takes a Context as first argument. {pull}17938[17938] ==== Bugfixes diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 7f65fa3406b..fc1e56be103 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -24,6 +24,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Variable substitution from environment variables is not longer supported. {pull}15937{15937} - Change aws_elb autodiscover provider field name from elb_listener.* to aws.elb.*. {issue}16219[16219] {pull}16402{16402} - Remove `AddDockerMetadata` and `AddKubernetesMetadata` processors from the `script` processor. They can still be used as normal processors in the configuration. {issue}16349[16349] {pull}16514[16514] +- Introduce APM libbeat instrumentation, active when running the beat with ELASTIC_APM_ACTIVE=true. {pull}17938[17938] *Auditbeat* diff --git a/NOTICE.txt b/NOTICE.txt index da2d1999032..a8f70e8c42d 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -405,6 +405,33 @@ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------- +Dependency: github.com/armon/go-radix +Version: v1.0.0 +License type (autodetected): MIT +./vendor/github.com/armon/go-radix/LICENSE: +-------------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2014 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + -------------------------------------------------------------------- Dependency: github.com/armon/go-socks5 Revision: e75332964ef5 @@ -6886,6 +6913,39 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------- +Dependency: github.com/santhosh-tekuri/jsonschema +Version: v1.2.4 +License type (autodetected): BSD-3-Clause +./vendor/github.com/santhosh-tekuri/jsonschema/LICENSE: +-------------------------------------------------------------------- +Copyright (c) 2017 Santhosh Kumar Tekuri. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------- Dependency: github.com/shirou/gopsutil Version: v2.19.11 @@ -7339,6 +7399,127 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------- +Dependency: go.elastic.co/apm +Version: v1.7.2 +License type (autodetected): Apache-2.0 +./vendor/go.elastic.co/apm/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + +-------NOTICE----- +Elastic APM Go Agent +Copyright 2018-2019 Elasticsearch B.V. + +This product includes software developed at Elasticsearch, B.V. (https://www.elastic.co/). + +========================================= +Third party code included by the Go Agent +========================================= + +------------------------------------------------------------------------------------ +This project copies code from the Go standard library (https://github.com/golang/go) +------------------------------------------------------------------------------------ + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------- +This project copies code from Gorilla Mux (https://github.com/gorilla/mux) +-------------------------------------------------------------------------- + +Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------------------------------------------------ +This project copies code from pq (https://github.com/lib/pq) +------------------------------------------------------------ + +Copyright (c) 2011-2013, 'pq' Contributors Portions Copyright (C) 2011 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------- +Dependency: go.elastic.co/apm/module/apmelasticsearch +Version: v1.7.2 +License type (autodetected): Apache-2.0 +./vendor/go.elastic.co/apm/module/apmelasticsearch/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + + +-------------------------------------------------------------------- +Dependency: go.elastic.co/apm/module/apmhttp +Version: v1.7.2 +License type (autodetected): Apache-2.0 +./vendor/go.elastic.co/apm/module/apmhttp/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + + +-------------------------------------------------------------------- +Dependency: go.elastic.co/fastjson +Version: v1.0.0 +License type (autodetected): Apache-2.0 +./vendor/go.elastic.co/fastjson/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + + -------------------------------------------------------------------- Dependency: go.opencensus.io Version: v0.22.2 diff --git a/go.mod b/go.mod index d13d920276d..c1e20b023ce 100644 --- a/go.mod +++ b/go.mod @@ -141,6 +141,9 @@ require ( github.com/urso/ecslog v0.0.1 github.com/vmware/govmomi v0.0.0-20170802214208-2cad15190b41 github.com/yuin/gopher-lua v0.0.0-20170403160031-b402f3114ec7 // indirect + go.elastic.co/apm v1.7.2 + go.elastic.co/apm/module/apmelasticsearch v1.7.2 + go.elastic.co/apm/module/apmhttp v1.7.2 go.uber.org/atomic v1.3.1 go.uber.org/multierr v1.1.1-0.20170829224307-fb7d312c2c04 go.uber.org/zap v1.7.1 diff --git a/go.sum b/go.sum index 4483c5af48d..420419809b5 100644 --- a/go.sum +++ b/go.sum @@ -113,6 +113,8 @@ github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYU github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antlr/antlr4 v0.0.0-20200326173327-a4c66dc863bb h1:jGlQhNsk+RHK5IFSvL5wUi+ed8HZPBR2ovtGyahWOcU= github.com/antlr/antlr4 v0.0.0-20200326173327-a4c66dc863bb/go.mod h1:T7PbCXFs94rrTttyxjbyT5+/1V8T2TYDejxUfHJjw1Y= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aws/aws-lambda-go v1.6.0 h1:T+u/g79zPKw1oJM7xYhvpq7i4Sjc0iVsXZUaqRVVSOg= @@ -173,6 +175,8 @@ github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+ github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea h1:n2Ltr3SrfQlf/9nOna1DoGKxLx3qTSI8Ttl6Xrqp6mw= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/cucumber/godog v0.8.1 h1:lVb+X41I4YDreE+ibZ50bdXmySxgRviYFgKY6Aw4XE8= +github.com/cucumber/godog v0.8.1/go.mod h1:vSh3r/lM+psC1BPXvdkSEuNjmXfpVqrMGYAElF6hxnA= github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -240,6 +244,7 @@ github.com/elastic/go-seccomp-bpf v1.1.0 h1:jUzzDc6LyCtdolZdvL/26dad6rZ9vsc7xZ2e github.com/elastic/go-seccomp-bpf v1.1.0/go.mod h1:l+89Vy5BzjVcaX8USZRMOwmwwDScE+vxCFzzvQwN7T8= github.com/elastic/go-structform v0.0.6 h1:wqeK4LwD2NNDOoRGTImE24S6pkCDVr8+oUSIkmChzLk= github.com/elastic/go-structform v0.0.6/go.mod h1:QrMyP3oM9Sjk92EVGLgRaL2lKt0Qx7ZNDRWDxB6khVs= +github.com/elastic/go-sysinfo v1.1.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= github.com/elastic/go-sysinfo v1.3.0 h1:eb2XFGTMlSwG/yyU9Y8jVAYLIzU2sFzWXwo2gmetyrE= github.com/elastic/go-sysinfo v1.3.0/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= github.com/elastic/go-txfile v0.0.7 h1:Yn28gclW7X0Qy09nSMSsx0uOAvAGMsp6XHydbiLVe2s= @@ -596,6 +601,8 @@ github.com/sanathkr/go-yaml v0.0.0-20170819195128-ed9d249f429b/go.mod h1:8458kAa github.com/sanathkr/yaml v0.0.0-20170819201035-0056894fa522/go.mod h1:tQTYKOQgxoH3v6dEmdHiz4JG+nbxWwM5fgPQUpSZqVQ= github.com/sanathkr/yaml v1.0.1-0.20170819201035-0056894fa522 h1:39BJIaZIhIBmXATIhdlTBlTQpAiGXHnz17CrO7vF2Ss= github.com/sanathkr/yaml v1.0.1-0.20170819201035-0056894fa522/go.mod h1:tQTYKOQgxoH3v6dEmdHiz4JG+nbxWwM5fgPQUpSZqVQ= +github.com/santhosh-tekuri/jsonschema v1.2.4 h1:hNhW8e7t+H1vgY+1QeEQpveR6D4+OwKPXCfD2aieJis= +github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v2.19.11+incompatible h1:lJHR0foqAjI4exXqWsU3DbH7bX1xvdhGdnXTIARA9W4= github.com/shirou/gopsutil v2.19.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= @@ -669,6 +676,14 @@ github.com/xeipuuv/gojsonschema v0.0.0-20181112162635-ac52e6811b56 h1:yhqBHs09Sm github.com/xeipuuv/gojsonschema v0.0.0-20181112162635-ac52e6811b56/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/yuin/gopher-lua v0.0.0-20170403160031-b402f3114ec7 h1:0gYLpmzecnaDCoeWxSfEJ7J1b6B/67+NV++4HKQXx+Y= github.com/yuin/gopher-lua v0.0.0-20170403160031-b402f3114ec7/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU= +go.elastic.co/apm v1.7.2 h1:0nwzVIPp4PDBXSYYtN19+1W5V+sj+C25UjqxDVoKcA8= +go.elastic.co/apm v1.7.2/go.mod h1:tCw6CkOJgkWnzEthFN9HUP1uL3Gjc/Ur6m7gRPLaoH0= +go.elastic.co/apm/module/apmelasticsearch v1.7.2 h1:5STGHLZLSeAzxordMc+dFVKiyVtMmxADOV+TgRaXXJg= +go.elastic.co/apm/module/apmelasticsearch v1.7.2/go.mod h1:ZyNFuyWdt42GBZkz0SogoLzDBrBGj4orxpiUuxYeYq8= +go.elastic.co/apm/module/apmhttp v1.7.2 h1:2mRh7SwBuEVLmJlX+hsMdcSg9xaielCLElaPn/+i34w= +go.elastic.co/apm/module/apmhttp v1.7.2/go.mod h1:sTFWiWejnhSdZv6+dMgxGec2Nxe/ZKfHfz/xtRM+cRY= +go.elastic.co/fastjson v1.0.0 h1:ooXV/ABvf+tBul26jcVViPT3sBir0PvXgibYB1IQQzg= +go.elastic.co/fastjson v1.0.0/go.mod h1:PmeUOMMtLHQr9ZS9J9owrAVg0FkaZDRZJEFTTGHtchs= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= diff --git a/libbeat/cmd/instance/beat.go b/libbeat/cmd/instance/beat.go index b355e68f9ac..1863c3aa0ab 100644 --- a/libbeat/cmd/instance/beat.go +++ b/libbeat/cmd/instance/beat.go @@ -33,6 +33,8 @@ import ( "strings" "time" + "go.elastic.co/apm" + "github.com/gofrs/uuid" errw "github.com/pkg/errors" "go.uber.org/zap" @@ -122,6 +124,8 @@ var debugf = logp.MakeDebug("beat") func init() { initRand() + // we need to close the default tracer to prevent the beat sending events to localhost:8200 + apm.DefaultTracer.Close() } // initRand initializes the runtime random number generator seed using @@ -332,11 +336,17 @@ func (b *Beat) createBeater(bt beat.Creator) (beat.Beater, error) { } } + tracer, err := apm.NewTracer(b.Info.Beat, b.Info.Version) + if err != nil { + return nil, err + } + pipeline, err := pipeline.Load(b.Info, pipeline.Monitors{ Metrics: reg, Telemetry: monitoring.GetNamespace("state").GetRegistry(), Logger: logp.L().Named("publisher"), + Tracer: tracer, }, b.Config.Pipeline, b.processing, diff --git a/libbeat/docs/release-notes/breaking/breaking-7.8.asciidoc b/libbeat/docs/release-notes/breaking/breaking-7.8.asciidoc new file mode 100644 index 00000000000..c94a45b7603 --- /dev/null +++ b/libbeat/docs/release-notes/breaking/breaking-7.8.asciidoc @@ -0,0 +1,25 @@ +[[breaking-changes-7.8]] + +=== Breaking changes in 7.8 +++++ +7.8 +++++ + +{see-relnotes} + +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] +[float] + +==== APM Instrumentation + +Libbeat includes the Elastic APM Agent for instrumenting the publishing pipeline. +Currently the Elasticsearch output is instrumented. APM can be enabled simply with +setting the `ELASTIC_APM_ACTIVE` environment variable to `true` when starting the beat. +To make tracing possible, the `Publish` method of the `Client` interface takes a +`Context` object as first argument. That `Context` is intended for propagating +request-scoped values, not for cancellation. + +// end::notable-breaking-changes[] diff --git a/libbeat/docs/release-notes/breaking/breaking.asciidoc b/libbeat/docs/release-notes/breaking/breaking.asciidoc index 51e58e6ba9d..5bd5d2ad03d 100644 --- a/libbeat/docs/release-notes/breaking/breaking.asciidoc +++ b/libbeat/docs/release-notes/breaking/breaking.asciidoc @@ -10,6 +10,7 @@ changes, but there are breaking changes between major versions (e.g. 6.x to 7.x) is not recommended. See the following topics for a description of breaking changes: +* <> * <> @@ -27,6 +28,8 @@ See the following topics for a description of breaking changes: * <> +include::breaking-7.8.asciidoc[] + include::breaking-7.7.asciidoc[] include::breaking-7.6.asciidoc[] diff --git a/libbeat/esleg/eslegclient/bulkapi.go b/libbeat/esleg/eslegclient/bulkapi.go index 86b518eeea1..ae7ea92f8ba 100644 --- a/libbeat/esleg/eslegclient/bulkapi.go +++ b/libbeat/esleg/eslegclient/bulkapi.go @@ -19,6 +19,7 @@ package eslegclient import ( "bytes" + "context" "encoding/json" "errors" "io" @@ -26,6 +27,9 @@ import ( "net/http" "strings" + "go.elastic.co/apm" + "go.elastic.co/apm/module/apmhttp" + "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/logp" ) @@ -59,6 +63,7 @@ type BulkResult json.RawMessage // Bulk performs many index/delete operations in a single API call. // Implements: http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html func (conn *Connection) Bulk( + ctx context.Context, index, docType string, params map[string]string, body []interface{}, ) (int, BulkResult, error) { @@ -69,13 +74,16 @@ func (conn *Connection) Bulk( enc := conn.Encoder enc.Reset() if err := bulkEncode(conn.log, enc, body); err != nil { + apm.CaptureError(ctx, err).Send() return 0, nil, err } requ, err := newBulkRequest(conn.URL, index, docType, params, enc) if err != nil { + apm.CaptureError(ctx, err).Send() return 0, nil, err } + requ.requ = apmhttp.RequestWithContext(ctx, requ.requ) return conn.sendBulkRequest(requ) } diff --git a/libbeat/esleg/eslegclient/bulkapi_integration_test.go b/libbeat/esleg/eslegclient/bulkapi_integration_test.go index ec7ba4a1c4d..0edce1afbe3 100644 --- a/libbeat/esleg/eslegclient/bulkapi_integration_test.go +++ b/libbeat/esleg/eslegclient/bulkapi_integration_test.go @@ -20,6 +20,7 @@ package eslegclient import ( + "context" "fmt" "os" "testing" @@ -54,7 +55,7 @@ func TestBulk(t *testing.T) { params := map[string]string{ "refresh": "true", } - _, _, err := client.Bulk(index, "type1", params, body) + _, _, err := client.Bulk(context.Background(), index, "type1", params, body) if err != nil { t.Fatalf("Bulk() returned error: %s", err) } @@ -87,7 +88,7 @@ func TestEmptyBulk(t *testing.T) { params := map[string]string{ "refresh": "true", } - _, resp, err := client.Bulk(index, "type1", params, body) + _, resp, err := client.Bulk(context.Background(), index, "type1", params, body) if err != nil { t.Fatalf("Bulk() returned error: %s", err) } @@ -155,7 +156,7 @@ func TestBulkMoreOperations(t *testing.T) { params := map[string]string{ "refresh": "true", } - _, resp, err := client.Bulk(index, "type1", params, body) + _, resp, err := client.Bulk(context.Background(), index, "type1", params, body) if err != nil { t.Fatalf("Bulk() returned error: %s [%s]", err, resp) } diff --git a/libbeat/esleg/eslegclient/bulkapi_mock_test.go b/libbeat/esleg/eslegclient/bulkapi_mock_test.go index 1fbd53d9425..ded3e53c95c 100644 --- a/libbeat/esleg/eslegclient/bulkapi_mock_test.go +++ b/libbeat/esleg/eslegclient/bulkapi_mock_test.go @@ -20,6 +20,7 @@ package eslegclient import ( + "context" "fmt" "net/http" "os" @@ -60,7 +61,7 @@ func TestOneHostSuccessResp_Bulk(t *testing.T) { params := map[string]string{ "refresh": "true", } - _, _, err := client.Bulk(index, "type1", params, body) + _, _, err := client.Bulk(context.Background(), index, "type1", params, body) if err != nil { t.Errorf("Bulk() returns error: %s", err) } @@ -96,7 +97,7 @@ func TestOneHost500Resp_Bulk(t *testing.T) { params := map[string]string{ "refresh": "true", } - _, _, err := client.Bulk(index, "type1", params, body) + _, _, err := client.Bulk(context.Background(), index, "type1", params, body) if err == nil { t.Errorf("Bulk() should return error.") } @@ -136,7 +137,7 @@ func TestOneHost503Resp_Bulk(t *testing.T) { params := map[string]string{ "refresh": "true", } - _, _, err := client.Bulk(index, "type1", params, body) + _, _, err := client.Bulk(context.Background(), index, "type1", params, body) if err == nil { t.Errorf("Bulk() should return error.") } diff --git a/libbeat/esleg/eslegclient/connection.go b/libbeat/esleg/eslegclient/connection.go index 7001d2e453d..138c9ab3c83 100644 --- a/libbeat/esleg/eslegclient/connection.go +++ b/libbeat/esleg/eslegclient/connection.go @@ -26,6 +26,8 @@ import ( "net/url" "time" + "go.elastic.co/apm/module/apmelasticsearch" + "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/transport" "github.com/elastic/beats/v7/libbeat/common/transport/kerberos" @@ -128,14 +130,16 @@ func NewConnection(s ConnectionSettings) (*Connection, error) { } var httpClient esHTTPClient + // when dropping the legacy client in favour of the official Go client, it should be instrumented + // eg, like in https://github.com/elastic/apm-server/blob/7.7/elasticsearch/client.go httpClient = &http.Client{ - Transport: &http.Transport{ + Transport: apmelasticsearch.WrapRoundTripper(&http.Transport{ Dial: dialer.Dial, DialTLS: tlsDialer.Dial, TLSClientConfig: s.TLS.ToConfig(), Proxy: proxy, IdleConnTimeout: s.IdleConnTimeout, - }, + }), Timeout: s.Timeout, } diff --git a/libbeat/monitoring/report/elasticsearch/client.go b/libbeat/monitoring/report/elasticsearch/client.go index 9e8469ab547..fb83a2e636b 100644 --- a/libbeat/monitoring/report/elasticsearch/client.go +++ b/libbeat/monitoring/report/elasticsearch/client.go @@ -18,11 +18,14 @@ package elasticsearch import ( + "context" "encoding/json" "fmt" "net/http" "time" + "go.elastic.co/apm" + "github.com/pkg/errors" "github.com/elastic/beats/v7/libbeat/common" @@ -103,7 +106,7 @@ func (c *publishClient) Close() error { return c.es.Close() } -func (c *publishClient) Publish(batch publisher.Batch) error { +func (c *publishClient) Publish(ctx context.Context, batch publisher.Batch) error { events := batch.Events() var failed []publisher.Event var reason error @@ -141,7 +144,7 @@ func (c *publishClient) Publish(batch publisher.Batch) error { case report.FormatXPackMonitoringBulk: err = c.publishXPackBulk(params, event, typ) case report.FormatBulk: - err = c.publishBulk(event, typ) + err = c.publishBulk(ctx, event, typ) } if err != nil { @@ -186,7 +189,7 @@ func (c *publishClient) publishXPackBulk(params map[string]string, event publish return err } -func (c *publishClient) publishBulk(event publisher.Event, typ string) error { +func (c *publishClient) publishBulk(ctx context.Context, event publisher.Event, typ string) error { meta := common.MapStr{ "_index": getMonitoringIndexName(), "_routing": nil, @@ -233,8 +236,9 @@ func (c *publishClient) publishBulk(event publisher.Event, typ string) error { // Currently one request per event is sent. Reason is that each event can contain different // interval params and X-Pack requires to send the interval param. - _, result, err := c.es.Bulk(getMonitoringIndexName(), "", nil, bulk[:]) + _, result, err := c.es.Bulk(ctx, getMonitoringIndexName(), "", nil, bulk[:]) if err != nil { + apm.CaptureError(ctx, fmt.Errorf("failed to perform any bulk index operations: %w", err)).Send() return err } diff --git a/libbeat/outputs/backoff.go b/libbeat/outputs/backoff.go index 256b8029b09..5c1ece2e5db 100644 --- a/libbeat/outputs/backoff.go +++ b/libbeat/outputs/backoff.go @@ -18,6 +18,7 @@ package outputs import ( + "context" "errors" "time" @@ -56,8 +57,8 @@ func (b *backoffClient) Close() error { return err } -func (b *backoffClient) Publish(batch publisher.Batch) error { - err := b.client.Publish(batch) +func (b *backoffClient) Publish(ctx context.Context, batch publisher.Batch) error { + err := b.client.Publish(ctx, batch) if err != nil { b.client.Close() } diff --git a/libbeat/outputs/console/console.go b/libbeat/outputs/console/console.go index 79aee6957d6..bbce8f449a9 100644 --- a/libbeat/outputs/console/console.go +++ b/libbeat/outputs/console/console.go @@ -19,6 +19,7 @@ package console import ( "bufio" + "context" "fmt" "os" "runtime" @@ -102,7 +103,7 @@ func newConsole(index string, observer outputs.Observer, codec codec.Codec) (*co } func (c *console) Close() error { return nil } -func (c *console) Publish(batch publisher.Batch) error { +func (c *console) Publish(_ context.Context, batch publisher.Batch) error { st := c.observer events := batch.Events() st.NewBatch(len(events)) diff --git a/libbeat/outputs/console/console_test.go b/libbeat/outputs/console/console_test.go index 29201beee54..a8e85601a89 100644 --- a/libbeat/outputs/console/console_test.go +++ b/libbeat/outputs/console/console_test.go @@ -21,6 +21,7 @@ package console import ( "bytes" + "context" "io" "os" "testing" @@ -130,7 +131,7 @@ func run(codec codec.Codec, batches ...publisher.Batch) (string, error) { return withStdout(func() { c, _ := newConsole("test", outputs.NewNilObserver(), codec) for _, b := range batches { - c.Publish(b) + c.Publish(context.Background(), b) } }) } diff --git a/libbeat/outputs/elasticsearch/client.go b/libbeat/outputs/elasticsearch/client.go index 096cf4af060..f341be53fe7 100644 --- a/libbeat/outputs/elasticsearch/client.go +++ b/libbeat/outputs/elasticsearch/client.go @@ -18,12 +18,15 @@ package elasticsearch import ( + "context" "encoding/base64" "errors" "fmt" "net/http" "time" + "go.elastic.co/apm" + "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/esleg/eslegclient" @@ -170,9 +173,9 @@ func (client *Client) Clone() *Client { return c } -func (client *Client) Publish(batch publisher.Batch) error { +func (client *Client) Publish(ctx context.Context, batch publisher.Batch) error { events := batch.Events() - rest, err := client.publishEvents(events) + rest, err := client.publishEvents(ctx, events) if len(rest) == 0 { batch.ACK() } else { @@ -184,9 +187,9 @@ func (client *Client) Publish(batch publisher.Batch) error { // PublishEvents sends all events to elasticsearch. On error a slice with all // events not published or confirmed to be processed by elasticsearch will be // returned. The input slice backing memory will be reused by return the value. -func (client *Client) publishEvents( - data []publisher.Event, -) ([]publisher.Event, error) { +func (client *Client) publishEvents(ctx context.Context, data []publisher.Event) ([]publisher.Event, error) { + span, ctx := apm.StartSpan(ctx, "publishEvents", "output") + defer span.End() begin := time.Now() st := client.observer @@ -201,8 +204,10 @@ func (client *Client) publishEvents( // encode events into bulk request buffer, dropping failed elements from // events slice origCount := len(data) + span.Context.SetLabel("events_original", origCount) data, bulkItems := bulkEncodePublishRequest(client.log, client.conn.GetVersion(), client.index, client.pipeline, data) newCount := len(data) + span.Context.SetLabel("events_encoded", newCount) if st != nil && origCount > newCount { st.Dropped(origCount - newCount) } @@ -210,14 +215,18 @@ func (client *Client) publishEvents( return nil, nil } - status, result, sendErr := client.conn.Bulk("", "", nil, bulkItems) + status, result, sendErr := client.conn.Bulk(ctx, "", "", nil, bulkItems) if sendErr != nil { - client.log.Errorf("Failed to perform any bulk index operations: %s", sendErr) + err := apm.CaptureError(ctx, fmt.Errorf("failed to perform any bulk index operations: %w", sendErr)) + err.Send() + client.log.Error(err) return data, sendErr } + pubCount := len(data) + span.Context.SetLabel("events_published", pubCount) client.log.Debugf("PublishEvents: %d events have been published to elasticsearch in %v.", - len(data), + pubCount, time.Now().Sub(begin)) // check response for transient errors @@ -231,6 +240,7 @@ func (client *Client) publishEvents( } failed := len(failedEvents) + span.Context.SetLabel("events_failed", failed) if st := client.observer; st != nil { dropped := stats.nonIndexable duplicates := stats.duplicates diff --git a/libbeat/outputs/elasticsearch/client_integration_test.go b/libbeat/outputs/elasticsearch/client_integration_test.go index 009b1edd833..9abbbe39873 100644 --- a/libbeat/outputs/elasticsearch/client_integration_test.go +++ b/libbeat/outputs/elasticsearch/client_integration_test.go @@ -30,6 +30,8 @@ import ( "testing" "time" + "go.elastic.co/apm/apmtest" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -89,7 +91,7 @@ func testPublishEvent(t *testing.T, index string, cfg map[string]interface{}) { }, }) - err := output.Publish(batch) + err := output.Publish(context.Background(), batch) if err != nil { t.Fatal(err) } @@ -127,7 +129,7 @@ func TestClientPublishEventWithPipeline(t *testing.T) { } publish := func(event beat.Event) { - err := output.Publish(outest.NewBatch(event)) + err := output.Publish(context.Background(), outest.NewBatch(event)) if err != nil { t.Fatal(err) } @@ -208,7 +210,7 @@ func TestClientBulkPublishEventsWithPipeline(t *testing.T) { } publish := func(events ...beat.Event) { - err := output.Publish(outest.NewBatch(events...)) + err := output.Publish(context.Background(), outest.NewBatch(events...)) if err != nil { t.Fatal(err) } @@ -272,6 +274,46 @@ func TestClientBulkPublishEventsWithPipeline(t *testing.T) { assert.Equal(t, 1, getCount("testfield:0")) // no pipeline } +func TestClientPublishTracer(t *testing.T) { + index := "beat-apm-tracer-test" + output, client := connectTestEs(t, map[string]interface{}{ + "index": index, + }) + + client.conn.Delete(index, "", "", nil) + + batch := outest.NewBatch(beat.Event{ + Timestamp: time.Now(), + Fields: common.MapStr{ + "message": "Hello world", + }, + }) + + tx, spans, _ := apmtest.WithTransaction(func(ctx context.Context) { + err := output.Publish(ctx, batch) + if err != nil { + t.Fatal(err) + } + }) + require.Len(t, spans, 2) + + // get spans in reverse order + firstSpan := spans[1] + + assert.Equal(t, "publishEvents", firstSpan.Name) + assert.Equal(t, "output", firstSpan.Type) + assert.Equal(t, [8]byte(firstSpan.TransactionID), [8]byte(tx.ID)) + assert.True(t, len(firstSpan.Context.Tags) > 0, "no tags found") + + secondSpan := spans[0] + assert.Contains(t, secondSpan.Name, "POST") + assert.Equal(t, "db", secondSpan.Type) + assert.Equal(t, "elasticsearch", secondSpan.Subtype) + assert.Equal(t, [8]byte(secondSpan.ParentID), [8]byte(firstSpan.ID)) + assert.Equal(t, [8]byte(secondSpan.TransactionID), [8]byte(tx.ID)) + assert.Equal(t, "/_bulk", secondSpan.Context.HTTP.URL.Path) +} + func connectTestEs(t *testing.T, cfg interface{}) (outputs.Client, *Client) { config, err := common.NewConfigFrom(map[string]interface{}{ "hosts": eslegtest.GetEsHost(), diff --git a/libbeat/outputs/elasticsearch/client_test.go b/libbeat/outputs/elasticsearch/client_test.go index d69849dabab..5219d052987 100644 --- a/libbeat/outputs/elasticsearch/client_test.go +++ b/libbeat/outputs/elasticsearch/client_test.go @@ -20,6 +20,7 @@ package elasticsearch import ( + "context" "fmt" "net/http" "net/http/httptest" @@ -242,7 +243,7 @@ func TestClientWithHeaders(t *testing.T) { }} batch := outest.NewBatch(event, event, event) - err = client.Publish(batch) + err = client.Publish(context.Background(), batch) assert.NoError(t, err) assert.Equal(t, 2, requestCount) } diff --git a/libbeat/outputs/failover.go b/libbeat/outputs/failover.go index b388a58a61f..f64720a7895 100644 --- a/libbeat/outputs/failover.go +++ b/libbeat/outputs/failover.go @@ -18,6 +18,7 @@ package outputs import ( + "context" "errors" "fmt" "math/rand" @@ -91,12 +92,12 @@ func (f *failoverClient) Close() error { return f.clients[f.active].Close() } -func (f *failoverClient) Publish(batch publisher.Batch) error { +func (f *failoverClient) Publish(ctx context.Context, batch publisher.Batch) error { if f.active < 0 { batch.Retry() return errNoActiveConnection } - return f.clients[f.active].Publish(batch) + return f.clients[f.active].Publish(ctx, batch) } func (f *failoverClient) Test(d testing.Driver) { diff --git a/libbeat/outputs/fileout/file.go b/libbeat/outputs/fileout/file.go index c3f5d3c5e4e..2c2f5216294 100644 --- a/libbeat/outputs/fileout/file.go +++ b/libbeat/outputs/fileout/file.go @@ -18,6 +18,7 @@ package fileout import ( + "context" "os" "path/filepath" @@ -109,9 +110,7 @@ func (out *fileOutput) Close() error { return out.rotator.Close() } -func (out *fileOutput) Publish( - batch publisher.Batch, -) error { +func (out *fileOutput) Publish(_ context.Context, batch publisher.Batch) error { defer batch.ACK() st := out.observer diff --git a/libbeat/outputs/kafka/client.go b/libbeat/outputs/kafka/client.go index 6925d93aa51..6080783192e 100644 --- a/libbeat/outputs/kafka/client.go +++ b/libbeat/outputs/kafka/client.go @@ -18,6 +18,7 @@ package kafka import ( + "context" "errors" "fmt" "strings" @@ -126,7 +127,7 @@ func (c *client) Close() error { return nil } -func (c *client) Publish(batch publisher.Batch) error { +func (c *client) Publish(_ context.Context, batch publisher.Batch) error { events := batch.Events() c.observer.NewBatch(len(events)) diff --git a/libbeat/outputs/kafka/kafka_integration_test.go b/libbeat/outputs/kafka/kafka_integration_test.go index 58d03d1c1e7..af46aa65c73 100644 --- a/libbeat/outputs/kafka/kafka_integration_test.go +++ b/libbeat/outputs/kafka/kafka_integration_test.go @@ -20,6 +20,7 @@ package kafka import ( + "context" "encoding/json" "fmt" "math/rand" @@ -220,7 +221,7 @@ func TestKafkaPublish(t *testing.T) { } wg.Add(1) - output.Publish(batch) + output.Publish(context.Background(), batch) } // wait for all published batches to be ACKed diff --git a/libbeat/outputs/logstash/async.go b/libbeat/outputs/logstash/async.go index bcbbbdbc428..f196d137b88 100644 --- a/libbeat/outputs/logstash/async.go +++ b/libbeat/outputs/logstash/async.go @@ -18,6 +18,7 @@ package logstash import ( + "context" "errors" "net" "sync" @@ -134,7 +135,7 @@ func (c *asyncClient) Close() error { return c.Client.Close() } -func (c *asyncClient) Publish(batch publisher.Batch) error { +func (c *asyncClient) Publish(_ context.Context, batch publisher.Batch) error { st := c.observer events := batch.Events() st.NewBatch(len(events)) diff --git a/libbeat/outputs/logstash/async_test.go b/libbeat/outputs/logstash/async_test.go index 5e6e416a0b4..04d97d8c40b 100644 --- a/libbeat/outputs/logstash/async_test.go +++ b/libbeat/outputs/logstash/async_test.go @@ -20,6 +20,7 @@ package logstash import ( + "context" "sync" "testing" "time" @@ -85,7 +86,7 @@ func newAsyncTestDriver(client outputs.NetworkClient) *testAsyncDriver { case driverCmdClose: driver.client.Close() case driverCmdPublish: - err := driver.client.Publish(cmd.batch) + err := driver.client.Publish(context.Background(), cmd.batch) driver.returns = append(driver.returns, testClientReturn{cmd.batch, err}) } } diff --git a/libbeat/outputs/logstash/logstash_integration_test.go b/libbeat/outputs/logstash/logstash_integration_test.go index 6dfebbabcec..0c744e470cb 100644 --- a/libbeat/outputs/logstash/logstash_integration_test.go +++ b/libbeat/outputs/logstash/logstash_integration_test.go @@ -20,6 +20,7 @@ package logstash import ( + "context" "encoding/json" "fmt" "os" @@ -301,7 +302,7 @@ func testSendMessageViaLogstash(t *testing.T, name string, tls bool) { }, }, ) - ls.Publish(batch) + ls.Publish(context.Background(), batch) // wait for logstash event flush + elasticsearch waitUntilTrue(5*time.Second, checkIndex(ls, 1)) @@ -556,7 +557,7 @@ func checkEvent(t *testing.T, ls, es map[string]interface{}) { } func (t *testOutputer) PublishEvent(event beat.Event) { - t.Publish(outest.NewBatch(event)) + t.Publish(context.Background(), outest.NewBatch(event)) } func (t *testOutputer) BulkPublish(events []beat.Event) bool { @@ -570,7 +571,7 @@ func (t *testOutputer) BulkPublish(events []beat.Event) bool { wg.Done() } - t.Publish(batch) + t.Publish(context.Background(), batch) wg.Wait() return ok } diff --git a/libbeat/outputs/logstash/logstash_test.go b/libbeat/outputs/logstash/logstash_test.go index 06d15567ec5..7b8adeb8f43 100644 --- a/libbeat/outputs/logstash/logstash_test.go +++ b/libbeat/outputs/logstash/logstash_test.go @@ -18,6 +18,7 @@ package logstash import ( + "context" "fmt" "os" "testing" @@ -126,7 +127,7 @@ func testConnectionType( batch.OnSignal = func(_ outest.BatchSignal) { close(sig) } - err = output.Publish(batch) + err = output.Publish(context.Background(), batch) t.Log("wait signal") <-sig diff --git a/libbeat/outputs/logstash/sync.go b/libbeat/outputs/logstash/sync.go index d13740d37f8..22e133db906 100644 --- a/libbeat/outputs/logstash/sync.go +++ b/libbeat/outputs/logstash/sync.go @@ -18,6 +18,7 @@ package logstash import ( + "context" "time" "github.com/elastic/beats/v7/libbeat/beat" @@ -101,7 +102,7 @@ func (c *syncClient) reconnect() error { return c.Client.Connect() } -func (c *syncClient) Publish(batch publisher.Batch) error { +func (c *syncClient) Publish(_ context.Context, batch publisher.Batch) error { events := batch.Events() st := c.observer diff --git a/libbeat/outputs/logstash/sync_test.go b/libbeat/outputs/logstash/sync_test.go index af90cfa130d..3ba9e682232 100644 --- a/libbeat/outputs/logstash/sync_test.go +++ b/libbeat/outputs/logstash/sync_test.go @@ -20,6 +20,7 @@ package logstash import ( + "context" "sync" "testing" "time" @@ -99,7 +100,7 @@ func newClientTestDriver(client outputs.NetworkClient) *testSyncDriver { case driverCmdClose: driver.client.Close() case driverCmdPublish: - err := driver.client.Publish(cmd.batch) + err := driver.client.Publish(context.Background(), cmd.batch) driver.returns = append(driver.returns, testClientReturn{cmd.batch, err}) } } diff --git a/libbeat/outputs/outputs.go b/libbeat/outputs/outputs.go index c6808321ce7..0fdf4d9407b 100644 --- a/libbeat/outputs/outputs.go +++ b/libbeat/outputs/outputs.go @@ -21,6 +21,8 @@ package outputs import ( + "context" + "github.com/elastic/beats/v7/libbeat/publisher" ) @@ -34,7 +36,8 @@ type Client interface { // Using Retry/Cancelled a client can return a batch of unprocessed events to // the publisher pipeline. The publisher pipeline (if configured by the output // factory) will take care of retrying/dropping events. - Publish(publisher.Batch) error + // Context is intended for carrying request-scoped values, not for cancellation. + Publish(context.Context, publisher.Batch) error // String identifies the client type and endpoint. String() string diff --git a/libbeat/outputs/redis/backoff.go b/libbeat/outputs/redis/backoff.go index 30107df90fa..41f448ca318 100644 --- a/libbeat/outputs/redis/backoff.go +++ b/libbeat/outputs/redis/backoff.go @@ -18,6 +18,7 @@ package redis import ( + "context" "time" "github.com/garyburd/redigo/redis" @@ -78,8 +79,8 @@ func (b *backoffClient) Close() error { return err } -func (b *backoffClient) Publish(batch publisher.Batch) error { - err := b.client.Publish(batch) +func (b *backoffClient) Publish(ctx context.Context, batch publisher.Batch) error { + err := b.client.Publish(ctx, batch) if err != nil { b.client.Close() b.updateFailReason(err) diff --git a/libbeat/outputs/redis/client.go b/libbeat/outputs/redis/client.go index fbaa40f4d3e..70e316cba3f 100644 --- a/libbeat/outputs/redis/client.go +++ b/libbeat/outputs/redis/client.go @@ -18,6 +18,7 @@ package redis import ( + "context" "errors" "regexp" "strconv" @@ -134,7 +135,7 @@ func (c *client) Close() error { return c.Client.Close() } -func (c *client) Publish(batch publisher.Batch) error { +func (c *client) Publish(_ context.Context, batch publisher.Batch) error { if c == nil { panic("no client") } diff --git a/libbeat/outputs/redis/redis_integration_test.go b/libbeat/outputs/redis/redis_integration_test.go index 66c3375246a..25189fa9008 100644 --- a/libbeat/outputs/redis/redis_integration_test.go +++ b/libbeat/outputs/redis/redis_integration_test.go @@ -20,6 +20,7 @@ package redis import ( + "context" "encoding/json" "fmt" "os" @@ -348,7 +349,7 @@ func sendTestEvents(out outputs.Client, batches, N int) error { } batch := outest.NewBatch(events...) - err := out.Publish(batch) + err := out.Publish(context.Background(), batch) if err != nil { return err } diff --git a/libbeat/publisher/pipeline/controller.go b/libbeat/publisher/pipeline/controller.go index 837a70eab77..f703f28f685 100644 --- a/libbeat/publisher/pipeline/controller.go +++ b/libbeat/publisher/pipeline/controller.go @@ -105,7 +105,7 @@ func (c *outputController) Set(outGrp outputs.Group) { clients := outGrp.Clients worker := make([]outputWorker, len(clients)) for i, client := range clients { - worker[i] = makeClientWorker(c.observer, c.workQueue, client) + worker[i] = makeClientWorker(c.observer, c.workQueue, client, c.monitors.Tracer) } grp := &outputGroup{ workQueue: c.workQueue, diff --git a/libbeat/publisher/pipeline/module.go b/libbeat/publisher/pipeline/module.go index b965f6f2552..4519cd382ae 100644 --- a/libbeat/publisher/pipeline/module.go +++ b/libbeat/publisher/pipeline/module.go @@ -21,6 +21,8 @@ import ( "flag" "fmt" + "go.elastic.co/apm" + "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/logp" @@ -43,6 +45,7 @@ type Monitors struct { Metrics *monitoring.Registry Telemetry *monitoring.Registry Logger *logp.Logger + Tracer *apm.Tracer } // OutputFactory is used by the publisher pipeline to create an output instance. diff --git a/libbeat/publisher/pipeline/nilpipeline.go b/libbeat/publisher/pipeline/nilpipeline.go index f32785a8d22..cf1b276db91 100644 --- a/libbeat/publisher/pipeline/nilpipeline.go +++ b/libbeat/publisher/pipeline/nilpipeline.go @@ -17,7 +17,9 @@ package pipeline -import "github.com/elastic/beats/v7/libbeat/beat" +import ( + "github.com/elastic/beats/v7/libbeat/beat" +) type nilPipeline struct{} diff --git a/libbeat/publisher/pipeline/output.go b/libbeat/publisher/pipeline/output.go index fa2ce73a28c..ffc5acfa6ad 100644 --- a/libbeat/publisher/pipeline/output.go +++ b/libbeat/publisher/pipeline/output.go @@ -18,6 +18,13 @@ package pipeline import ( + "context" + "fmt" + + "github.com/elastic/beats/v7/libbeat/publisher" + + "go.elastic.co/apm" + "github.com/elastic/beats/v7/libbeat/logp" "github.com/elastic/beats/v7/libbeat/outputs" ) @@ -43,9 +50,11 @@ type netClientWorker struct { batchSize int batchSizer func() int logger *logp.Logger + + tracer *apm.Tracer } -func makeClientWorker(observer outputObserver, qu workQueue, client outputs.Client) outputWorker { +func makeClientWorker(observer outputObserver, qu workQueue, client outputs.Client, tracer *apm.Tracer) outputWorker { w := worker{ observer: observer, qu: qu, @@ -62,6 +71,7 @@ func makeClientWorker(observer outputObserver, qu workQueue, client outputs.Clie worker: w, client: nc, logger: logp.NewLogger("publisher_pipeline_output"), + tracer: tracer, } } else { c = &clientWorker{worker: w, client: client} @@ -94,8 +104,7 @@ func (w *clientWorker) run() { continue } w.observer.outBatchSend(len(batch.Events())) - - if err := w.client.Publish(batch); err != nil { + if err := w.client.Publish(context.TODO(), batch); err != nil { return } } @@ -150,11 +159,28 @@ func (w *netClientWorker) run() { continue } - if err := w.client.Publish(batch); err != nil { - w.logger.Errorf("Failed to publish events: %v", err) - // on error return to connect loop + if err := w.publishBatch(batch); err != nil { connected = false } } } } + +func (w *netClientWorker) publishBatch(batch publisher.Batch) error { + ctx := context.Background() + if w.tracer != nil { + tx := w.tracer.StartTransaction("publish", "output") + defer tx.End() + tx.Context.SetLabel("worker", "netclient") + ctx = apm.ContextWithTransaction(ctx, tx) + } + err := w.client.Publish(ctx, batch) + if err != nil { + err = fmt.Errorf("failed to publish events: %w", err) + apm.CaptureError(ctx, err).Send() + w.logger.Error(err) + // on error return to connect loop + return err + } + return nil +} diff --git a/libbeat/publisher/pipeline/output_test.go b/libbeat/publisher/pipeline/output_test.go index 5f471ddf396..36c138a6d01 100644 --- a/libbeat/publisher/pipeline/output_test.go +++ b/libbeat/publisher/pipeline/output_test.go @@ -24,6 +24,8 @@ import ( "testing/quick" "time" + "go.elastic.co/apm/apmtest" + "github.com/stretchr/testify/require" "github.com/elastic/beats/v7/libbeat/common/atomic" @@ -58,7 +60,7 @@ func TestMakeClientWorker(t *testing.T) { client := ctor(publishFn) - worker := makeClientWorker(nilObserver, wqu, client) + worker := makeClientWorker(nilObserver, wqu, client, nil) defer worker.Close() for i := uint(0); i < numBatches; i++ { @@ -137,7 +139,7 @@ func TestReplaceClientWorker(t *testing.T) { } client := ctor(blockingPublishFn) - worker := makeClientWorker(nilObserver, wqu, client) + worker := makeClientWorker(nilObserver, wqu, client, nil) // Allow the worker to make *some* progress before we close it timeout := 10 * time.Second @@ -162,7 +164,7 @@ func TestReplaceClientWorker(t *testing.T) { } client = ctor(countingPublishFn) - makeClientWorker(nilObserver, wqu, client) + makeClientWorker(nilObserver, wqu, client, nil) wg.Wait() // Make sure that all events have eventually been published @@ -178,3 +180,52 @@ func TestReplaceClientWorker(t *testing.T) { }) } } + +func TestMakeClientTracer(t *testing.T) { + seedPRNG(t) + + numBatches := 10 + numEvents := atomic.MakeUint(0) + + wqu := makeWorkQueue() + retryer := newRetryer(logp.NewLogger("test"), nilObserver, wqu, nil) + defer retryer.close() + + var published atomic.Uint + publishFn := func(batch publisher.Batch) error { + published.Add(uint(len(batch.Events()))) + return nil + } + + client := newMockNetworkClient(publishFn) + + recorder := apmtest.NewRecordingTracer() + defer recorder.Close() + + worker := makeClientWorker(nilObserver, wqu, client, recorder.Tracer) + defer worker.Close() + + for i := 0; i < numBatches; i++ { + batch := randomBatch(10, 15).withRetryer(retryer) + numEvents.Add(uint(len(batch.Events()))) + wqu <- batch + } + + // Give some time for events to be published + timeout := 10 * time.Second + + // Make sure that all events have eventually been published + matches := waitUntilTrue(timeout, func() bool { + return numEvents == published + }) + if !matches { + t.Errorf("expected %d events, got %d", numEvents, published) + } + recorder.Flush(nil) + + apmEvents := recorder.Payloads() + transactions := apmEvents.Transactions + if len(transactions) != numBatches { + t.Errorf("expected %d traces, got %d", numBatches, len(transactions)) + } +} diff --git a/libbeat/publisher/pipeline/stress/out.go b/libbeat/publisher/pipeline/stress/out.go index 692d62f98ab..00afb1ac74e 100644 --- a/libbeat/publisher/pipeline/stress/out.go +++ b/libbeat/publisher/pipeline/stress/out.go @@ -18,6 +18,7 @@ package stress import ( + "context" "math/rand" "time" @@ -70,7 +71,7 @@ func makeTestOutput(_ outputs.IndexManager, beat beat.Info, observer outputs.Obs func (*testOutput) Close() error { return nil } -func (t *testOutput) Publish(batch publisher.Batch) error { +func (t *testOutput) Publish(_ context.Context, batch publisher.Batch) error { config := &t.config n := len(batch.Events()) diff --git a/libbeat/publisher/pipeline/testing.go b/libbeat/publisher/pipeline/testing.go index 0db2780ba56..5534c0ce3b4 100644 --- a/libbeat/publisher/pipeline/testing.go +++ b/libbeat/publisher/pipeline/testing.go @@ -18,6 +18,7 @@ package pipeline import ( + "context" "flag" "math/rand" "sync" @@ -45,7 +46,7 @@ type mockClient struct { func (c *mockClient) String() string { return "mock_client" } func (c *mockClient) Close() error { return nil } -func (c *mockClient) Publish(batch publisher.Batch) error { +func (c *mockClient) Publish(_ context.Context, batch publisher.Batch) error { return c.publishFn(batch) } diff --git a/vendor/github.com/armon/go-radix/.gitignore b/vendor/github.com/armon/go-radix/.gitignore new file mode 100644 index 00000000000..00268614f04 --- /dev/null +++ b/vendor/github.com/armon/go-radix/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/armon/go-radix/.travis.yml b/vendor/github.com/armon/go-radix/.travis.yml new file mode 100644 index 00000000000..1a0bbea6c77 --- /dev/null +++ b/vendor/github.com/armon/go-radix/.travis.yml @@ -0,0 +1,3 @@ +language: go +go: + - tip diff --git a/vendor/github.com/armon/go-radix/LICENSE b/vendor/github.com/armon/go-radix/LICENSE new file mode 100644 index 00000000000..a5df10e675d --- /dev/null +++ b/vendor/github.com/armon/go-radix/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/armon/go-radix/README.md b/vendor/github.com/armon/go-radix/README.md new file mode 100644 index 00000000000..26f42a2837c --- /dev/null +++ b/vendor/github.com/armon/go-radix/README.md @@ -0,0 +1,38 @@ +go-radix [![Build Status](https://travis-ci.org/armon/go-radix.png)](https://travis-ci.org/armon/go-radix) +========= + +Provides the `radix` package that implements a [radix tree](http://en.wikipedia.org/wiki/Radix_tree). +The package only provides a single `Tree` implementation, optimized for sparse nodes. + +As a radix tree, it provides the following: + * O(k) operations. In many cases, this can be faster than a hash table since + the hash function is an O(k) operation, and hash tables have very poor cache locality. + * Minimum / Maximum value lookups + * Ordered iteration + +For an immutable variant, see [go-immutable-radix](https://github.com/hashicorp/go-immutable-radix). + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/armon/go-radix). + +Example +======= + +Below is a simple example of usage + +```go +// Create a tree +r := radix.New() +r.Insert("foo", 1) +r.Insert("bar", 2) +r.Insert("foobar", 2) + +// Find the longest prefix match +m, _, _ := r.LongestPrefix("foozip") +if m != "foo" { + panic("should be foo") +} +``` + diff --git a/vendor/github.com/armon/go-radix/go.mod b/vendor/github.com/armon/go-radix/go.mod new file mode 100644 index 00000000000..4336aa29ea2 --- /dev/null +++ b/vendor/github.com/armon/go-radix/go.mod @@ -0,0 +1 @@ +module github.com/armon/go-radix diff --git a/vendor/github.com/armon/go-radix/radix.go b/vendor/github.com/armon/go-radix/radix.go new file mode 100644 index 00000000000..e2bb22eb91d --- /dev/null +++ b/vendor/github.com/armon/go-radix/radix.go @@ -0,0 +1,540 @@ +package radix + +import ( + "sort" + "strings" +) + +// WalkFn is used when walking the tree. Takes a +// key and value, returning if iteration should +// be terminated. +type WalkFn func(s string, v interface{}) bool + +// leafNode is used to represent a value +type leafNode struct { + key string + val interface{} +} + +// edge is used to represent an edge node +type edge struct { + label byte + node *node +} + +type node struct { + // leaf is used to store possible leaf + leaf *leafNode + + // prefix is the common prefix we ignore + prefix string + + // Edges should be stored in-order for iteration. + // We avoid a fully materialized slice to save memory, + // since in most cases we expect to be sparse + edges edges +} + +func (n *node) isLeaf() bool { + return n.leaf != nil +} + +func (n *node) addEdge(e edge) { + n.edges = append(n.edges, e) + n.edges.Sort() +} + +func (n *node) updateEdge(label byte, node *node) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + n.edges[idx].node = node + return + } + panic("replacing missing edge") +} + +func (n *node) getEdge(label byte) *node { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + return n.edges[idx].node + } + return nil +} + +func (n *node) delEdge(label byte) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + copy(n.edges[idx:], n.edges[idx+1:]) + n.edges[len(n.edges)-1] = edge{} + n.edges = n.edges[:len(n.edges)-1] + } +} + +type edges []edge + +func (e edges) Len() int { + return len(e) +} + +func (e edges) Less(i, j int) bool { + return e[i].label < e[j].label +} + +func (e edges) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e edges) Sort() { + sort.Sort(e) +} + +// Tree implements a radix tree. This can be treated as a +// Dictionary abstract data type. The main advantage over +// a standard hash map is prefix-based lookups and +// ordered iteration, +type Tree struct { + root *node + size int +} + +// New returns an empty Tree +func New() *Tree { + return NewFromMap(nil) +} + +// NewFromMap returns a new tree containing the keys +// from an existing map +func NewFromMap(m map[string]interface{}) *Tree { + t := &Tree{root: &node{}} + for k, v := range m { + t.Insert(k, v) + } + return t +} + +// Len is used to return the number of elements in the tree +func (t *Tree) Len() int { + return t.size +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 string) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +// Insert is used to add a newentry or update +// an existing entry. Returns if updated. +func (t *Tree) Insert(s string, v interface{}) (interface{}, bool) { + var parent *node + n := t.root + search := s + for { + // Handle key exhaution + if len(search) == 0 { + if n.isLeaf() { + old := n.leaf.val + n.leaf.val = v + return old, true + } + + n.leaf = &leafNode{ + key: s, + val: v, + } + t.size++ + return nil, false + } + + // Look for the edge + parent = n + n = n.getEdge(search[0]) + + // No edge, create one + if n == nil { + e := edge{ + label: search[0], + node: &node{ + leaf: &leafNode{ + key: s, + val: v, + }, + prefix: search, + }, + } + parent.addEdge(e) + t.size++ + return nil, false + } + + // Determine longest prefix of the search key on match + commonPrefix := longestPrefix(search, n.prefix) + if commonPrefix == len(n.prefix) { + search = search[commonPrefix:] + continue + } + + // Split the node + t.size++ + child := &node{ + prefix: search[:commonPrefix], + } + parent.updateEdge(search[0], child) + + // Restore the existing node + child.addEdge(edge{ + label: n.prefix[commonPrefix], + node: n, + }) + n.prefix = n.prefix[commonPrefix:] + + // Create a new leaf node + leaf := &leafNode{ + key: s, + val: v, + } + + // If the new key is a subset, add to to this node + search = search[commonPrefix:] + if len(search) == 0 { + child.leaf = leaf + return nil, false + } + + // Create a new edge for the node + child.addEdge(edge{ + label: search[0], + node: &node{ + leaf: leaf, + prefix: search, + }, + }) + return nil, false + } +} + +// Delete is used to delete a key, returning the previous +// value and if it was deleted +func (t *Tree) Delete(s string) (interface{}, bool) { + var parent *node + var label byte + n := t.root + search := s + for { + // Check for key exhaution + if len(search) == 0 { + if !n.isLeaf() { + break + } + goto DELETE + } + + // Look for an edge + parent = n + label = search[0] + n = n.getEdge(label) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return nil, false + +DELETE: + // Delete the leaf + leaf := n.leaf + n.leaf = nil + t.size-- + + // Check if we should delete this node from the parent + if parent != nil && len(n.edges) == 0 { + parent.delEdge(label) + } + + // Check if we should merge this node + if n != t.root && len(n.edges) == 1 { + n.mergeChild() + } + + // Check if we should merge the parent's other child + if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { + parent.mergeChild() + } + + return leaf.val, true +} + +// DeletePrefix is used to delete the subtree under a prefix +// Returns how many nodes were deleted +// Use this to delete large subtrees efficiently +func (t *Tree) DeletePrefix(s string) int { + return t.deletePrefix(nil, t.root, s) +} + +// delete does a recursive deletion +func (t *Tree) deletePrefix(parent, n *node, prefix string) int { + // Check for key exhaustion + if len(prefix) == 0 { + // Remove the leaf node + subTreeSize := 0 + //recursively walk from all edges of the node to be deleted + recursiveWalk(n, func(s string, v interface{}) bool { + subTreeSize++ + return false + }) + if n.isLeaf() { + n.leaf = nil + } + n.edges = nil // deletes the entire subtree + + // Check if we should merge the parent's other child + if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { + parent.mergeChild() + } + t.size -= subTreeSize + return subTreeSize + } + + // Look for an edge + label := prefix[0] + child := n.getEdge(label) + if child == nil || (!strings.HasPrefix(child.prefix, prefix) && !strings.HasPrefix(prefix, child.prefix)) { + return 0 + } + + // Consume the search prefix + if len(child.prefix) > len(prefix) { + prefix = prefix[len(prefix):] + } else { + prefix = prefix[len(child.prefix):] + } + return t.deletePrefix(n, child, prefix) +} + +func (n *node) mergeChild() { + e := n.edges[0] + child := e.node + n.prefix = n.prefix + child.prefix + n.leaf = child.leaf + n.edges = child.edges +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Tree) Get(s string) (interface{}, bool) { + n := t.root + search := s + for { + // Check for key exhaution + if len(search) == 0 { + if n.isLeaf() { + return n.leaf.val, true + } + break + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return nil, false +} + +// LongestPrefix is like Get, but instead of an +// exact match, it will return the longest prefix match. +func (t *Tree) LongestPrefix(s string) (string, interface{}, bool) { + var last *leafNode + n := t.root + search := s + for { + // Look for a leaf node + if n.isLeaf() { + last = n.leaf + } + + // Check for key exhaution + if len(search) == 0 { + break + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + if last != nil { + return last.key, last.val, true + } + return "", nil, false +} + +// Minimum is used to return the minimum value in the tree +func (t *Tree) Minimum() (string, interface{}, bool) { + n := t.root + for { + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + if len(n.edges) > 0 { + n = n.edges[0].node + } else { + break + } + } + return "", nil, false +} + +// Maximum is used to return the maximum value in the tree +func (t *Tree) Maximum() (string, interface{}, bool) { + n := t.root + for { + if num := len(n.edges); num > 0 { + n = n.edges[num-1].node + continue + } + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + break + } + return "", nil, false +} + +// Walk is used to walk the tree +func (t *Tree) Walk(fn WalkFn) { + recursiveWalk(t.root, fn) +} + +// WalkPrefix is used to walk the tree under a prefix +func (t *Tree) WalkPrefix(prefix string, fn WalkFn) { + n := t.root + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + recursiveWalk(n, fn) + return + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if strings.HasPrefix(n.prefix, search) { + // Child may be under our search prefix + recursiveWalk(n, fn) + return + } else { + break + } + } + +} + +// WalkPath is used to walk the tree, but only visiting nodes +// from the root down to a given leaf. Where WalkPrefix walks +// all the entries *under* the given prefix, this walks the +// entries *above* the given prefix. +func (t *Tree) WalkPath(path string, fn WalkFn) { + n := t.root + search := path + for { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return + } + + // Check for key exhaution + if len(search) == 0 { + return + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + return + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } +} + +// recursiveWalk is used to do a pre-order walk of a node +// recursively. Returns true if the walk should be aborted +func recursiveWalk(n *node, fn WalkFn) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children + for _, e := range n.edges { + if recursiveWalk(e.node, fn) { + return true + } + } + return false +} + +// ToMap is used to walk the tree and convert it into a map +func (t *Tree) ToMap() map[string]interface{} { + out := make(map[string]interface{}, t.size) + t.Walk(func(k string, v interface{}) bool { + out[k] = v + return false + }) + return out +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/.travis.yml b/vendor/github.com/santhosh-tekuri/jsonschema/.travis.yml new file mode 100644 index 00000000000..1ab35ab1b1f --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/.travis.yml @@ -0,0 +1,10 @@ +language: go + +go: + - 1.8.1 + +script: + - ./go.test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/LICENSE b/vendor/github.com/santhosh-tekuri/jsonschema/LICENSE new file mode 100644 index 00000000000..65cd403ab0e --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2017 Santhosh Kumar Tekuri. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/README.md b/vendor/github.com/santhosh-tekuri/jsonschema/README.md new file mode 100644 index 00000000000..3d369d71d74 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/README.md @@ -0,0 +1,148 @@ +# jsonschema + +[![License](https://img.shields.io/badge/License-BSD%203--Clause-blue.svg)](https://opensource.org/licenses/BSD-3-Clause) +[![GoDoc](https://godoc.org/github.com/santhosh-tekuri/jsonschema?status.svg)](https://godoc.org/github.com/santhosh-tekuri/jsonschema) +[![Go Report Card](https://goreportcard.com/badge/github.com/santhosh-tekuri/jsonschema)](https://goreportcard.com/report/github.com/santhosh-tekuri/jsonschema) +[![Build Status](https://travis-ci.org/santhosh-tekuri/jsonschema.svg?branch=master)](https://travis-ci.org/santhosh-tekuri/jsonschema) +[![codecov.io](https://codecov.io/github/santhosh-tekuri/jsonschema/coverage.svg?branch=master)](https://codecov.io/github/santhosh-tekuri/jsonschema?branch=master) + +Package jsonschema provides json-schema compilation and validation. + +This implementation of JSON Schema, supports draft4, draft6 and draft7. + +Passes all tests(including optional) in https://github.com/json-schema/JSON-Schema-Test-Suite + +An example of using this package: + +```go +schema, err := jsonschema.Compile("schemas/purchaseOrder.json") +if err != nil { + return err +} +f, err := os.Open("purchaseOrder.json") +if err != nil { + return err +} +defer f.Close() +if err = schema.Validate(f); err != nil { + return err +} +``` + +The schema is compiled against the version specified in `$schema` property. +If `$schema` property is missing, it uses latest draft which currently is draft7. +You can force to use draft4 when `$schema` is missing, as follows: + +```go +compiler := jsonschema.NewCompiler() +compler.Draft = jsonschema.Draft4 +``` + +you can also validate go value using `schema.ValidateInterface(interface{})` method. +but the argument should not be user-defined struct. + + +This package supports loading json-schema from filePath and fileURL. + +To load json-schema from HTTPURL, add following import: + +```go +import _ "github.com/santhosh-tekuri/jsonschema/httploader" +``` + +Loading from urls for other schemes (such as ftp), can be plugged in. see package jsonschema/httploader +for an example + +To load json-schema from in-memory: + +```go +data := `{"type": "string"}` +url := "sch.json" +compiler := jsonschema.NewCompiler() +if err := compiler.AddResource(url, strings.NewReader(data)); err != nil { + return err +} +schema, err := compiler.Compile(url) +if err != nil { + return err +} +f, err := os.Open("doc.json") +if err != nil { + return err +} +defer f.Close() +if err = schema.Validate(f); err != nil { + return err +} +``` + +This package supports json string formats: +- date-time +- date +- time +- hostname +- email +- ip-address +- ipv4 +- ipv6 +- uri +- uriref/uri-reference +- regex +- format +- json-pointer +- relative-json-pointer +- uri-template (limited validation) + +Developers can register their own formats using package "github.com/santhosh-tekuri/jsonschema/formats". + +"base64" contentEncoding is supported. Custom decoders can be registered using package "github.com/santhosh-tekuri/jsonschema/decoders". + +"application/json" contentMediaType is supported. Custom mediatypes can be registered using package "github.com/santhosh-tekuri/jsonschema/mediatypes". + +## ValidationError + +The ValidationError returned by Validate method contains detailed context to understand why and where the error is. + +schema.json: +```json +{ + "$ref": "t.json#/definitions/employee" +} +``` + +t.json: +```json +{ + "definitions": { + "employee": { + "type": "string" + } + } +} +``` + +doc.json: +```json +1 +``` + +Validating `doc.json` with `schema.json`, gives following ValidationError: +``` +I[#] S[#] doesn't validate with "schema.json#" + I[#] S[#/$ref] doesn't valide with "t.json#/definitions/employee" + I[#] S[#/definitions/employee/type] expected string, but got number +``` + +Here `I` stands for instance document and `S` stands for schema document. +The json-fragments that caused error in instance and schema documents are represented using json-pointer notation. +Nested causes are printed with indent. + +## CLI + +```bash +jv []... +``` + +if no `` arguments are passed, it simply validates the ``. + +exit-code is 1, if there are any validation errors diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/compiler.go b/vendor/github.com/santhosh-tekuri/jsonschema/compiler.go new file mode 100644 index 00000000000..9ffb715ab05 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/compiler.go @@ -0,0 +1,534 @@ +// Copyright 2017 Santhosh Kumar Tekuri. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonschema + +import ( + "encoding/json" + "fmt" + "io" + "math/big" + "regexp" + "strings" + + "github.com/santhosh-tekuri/jsonschema/decoders" + "github.com/santhosh-tekuri/jsonschema/formats" + "github.com/santhosh-tekuri/jsonschema/loader" + "github.com/santhosh-tekuri/jsonschema/mediatypes" +) + +func init() { + formats.Register("encoding", func(s string) bool { + _, ok := decoders.Get(s) + return ok + }) + formats.Register("mediatype", func(s string) bool { + _, ok := mediatypes.Get(s) + return ok + }) +} + +// A Draft represents json-schema draft +type Draft struct { + meta *Schema + id string // property name used to represent schema id. + version int +} + +var latest = Draft7 + +func (draft *Draft) validateSchema(url, ptr string, v interface{}) error { + if meta := draft.meta; meta != nil { + if err := meta.validate(v); err != nil { + addContext(ptr, "", err) + finishSchemaContext(err, meta) + finishInstanceContext(err) + var instancePtr string + if ptr == "" { + instancePtr = "#" + } else { + instancePtr = "#/" + ptr + } + return &SchemaError{ + url, + &ValidationError{ + Message: fmt.Sprintf("doesn't validate with %q", meta.URL+meta.Ptr), + InstancePtr: instancePtr, + SchemaURL: meta.URL, + SchemaPtr: "#", + Causes: []*ValidationError{err.(*ValidationError)}, + }, + } + } + } + return nil +} + +// A Compiler represents a json-schema compiler. +// +// Currently draft4, draft6 and draft7 are supported +type Compiler struct { + // Draft represents the draft used when '$schema' attribute is missing. + // + // This defaults to latest draft (currently draft7). + Draft *Draft + resources map[string]*resource + + // ExtractAnnotations tells whether schema annotations has to be extracted + // in compiled Schema or not. + ExtractAnnotations bool +} + +// NewCompiler returns a draft7 json-schema Compiler object. +func NewCompiler() *Compiler { + return &Compiler{Draft: latest, resources: make(map[string]*resource)} +} + +// AddResource adds in-memory resource to the compiler. +// +// Note that url must not have fragment +func (c *Compiler) AddResource(url string, r io.Reader) error { + res, err := newResource(url, r) + if err != nil { + return err + } + c.resources[res.url] = res + return nil +} + +// MustCompile is like Compile but panics if the url cannot be compiled to *Schema. +// It simplifies safe initialization of global variables holding compiled Schemas. +func (c *Compiler) MustCompile(url string) *Schema { + s, err := c.Compile(url) + if err != nil { + panic(fmt.Sprintf("jsonschema: Compile(%q): %s", url, err)) + } + return s +} + +// Compile parses json-schema at given url returns, if successful, +// a Schema object that can be used to match against json. +func (c *Compiler) Compile(url string) (*Schema, error) { + base, fragment := split(url) + if _, ok := c.resources[base]; !ok { + r, err := loader.Load(base) + if err != nil { + return nil, err + } + defer r.Close() + if err := c.AddResource(base, r); err != nil { + return nil, err + } + } + r := c.resources[base] + if r.draft == nil { + if m, ok := r.doc.(map[string]interface{}); ok { + if url, ok := m["$schema"]; ok { + switch url { + case "http://json-schema.org/schema#": + r.draft = latest + case "http://json-schema.org/draft-07/schema#": + r.draft = Draft7 + case "http://json-schema.org/draft-06/schema#": + r.draft = Draft6 + case "http://json-schema.org/draft-04/schema#": + r.draft = Draft4 + default: + return nil, fmt.Errorf("unknown $schema %q", url) + } + } + } + if r.draft == nil { + r.draft = c.Draft + } + } + return c.compileRef(r, r.url, fragment) +} + +func (c Compiler) compileRef(r *resource, base, ref string) (*Schema, error) { + var err error + if rootFragment(ref) { + if _, ok := r.schemas["#"]; !ok { + if err := r.draft.validateSchema(r.url, "", r.doc); err != nil { + return nil, err + } + s := &Schema{URL: r.url, Ptr: "#"} + r.schemas["#"] = s + if m, ok := r.doc.(map[string]interface{}); ok { + if _, err := c.compile(r, s, base, m); err != nil { + return nil, err + } + } else { + if _, err := c.compile(r, s, base, r.doc); err != nil { + return nil, err + } + } + } + return r.schemas["#"], nil + } + + if strings.HasPrefix(ref, "#/") { + if _, ok := r.schemas[ref]; !ok { + ptrBase, doc, err := r.resolvePtr(ref) + if err != nil { + return nil, err + } + if err := r.draft.validateSchema(r.url, strings.TrimPrefix(ref, "#/"), doc); err != nil { + return nil, err + } + r.schemas[ref] = &Schema{URL: base, Ptr: ref} + if _, err := c.compile(r, r.schemas[ref], ptrBase, doc); err != nil { + return nil, err + } + } + return r.schemas[ref], nil + } + + refURL, err := resolveURL(base, ref) + if err != nil { + return nil, err + } + if rs, ok := r.schemas[refURL]; ok { + return rs, nil + } + + ids := make(map[string]map[string]interface{}) + if err := resolveIDs(r.draft, r.url, r.doc, ids); err != nil { + return nil, err + } + if v, ok := ids[refURL]; ok { + if err := r.draft.validateSchema(r.url, "", v); err != nil { + return nil, err + } + u, f := split(refURL) + s := &Schema{URL: u, Ptr: f} + r.schemas[refURL] = s + if err := c.compileMap(r, s, refURL, v); err != nil { + return nil, err + } + return s, nil + } + + base, _ = split(refURL) + if base == r.url { + return nil, fmt.Errorf("invalid ref: %q", refURL) + } + return c.Compile(refURL) +} + +func (c Compiler) compile(r *resource, s *Schema, base string, m interface{}) (*Schema, error) { + if s == nil { + s = new(Schema) + s.URL, _ = split(base) + } + switch m := m.(type) { + case bool: + s.Always = &m + return s, nil + default: + return s, c.compileMap(r, s, base, m.(map[string]interface{})) + } +} + +func (c Compiler) compileMap(r *resource, s *Schema, base string, m map[string]interface{}) error { + var err error + + if id, ok := m[r.draft.id]; ok { + if base, err = resolveURL(base, id.(string)); err != nil { + return err + } + } + + if ref, ok := m["$ref"]; ok { + b, _ := split(base) + s.Ref, err = c.compileRef(r, b, ref.(string)) + if err != nil { + return err + } + // All other properties in a "$ref" object MUST be ignored + return nil + } + + if t, ok := m["type"]; ok { + switch t := t.(type) { + case string: + s.Types = []string{t} + case []interface{}: + s.Types = toStrings(t) + } + } + + if e, ok := m["enum"]; ok { + s.Enum = e.([]interface{}) + allPrimitives := true + for _, item := range s.Enum { + switch jsonType(item) { + case "object", "array": + allPrimitives = false + break + } + } + s.enumError = "enum failed" + if allPrimitives { + if len(s.Enum) == 1 { + s.enumError = fmt.Sprintf("value must be %#v", s.Enum[0]) + } else { + strEnum := make([]string, len(s.Enum)) + for i, item := range s.Enum { + strEnum[i] = fmt.Sprintf("%#v", item) + } + s.enumError = fmt.Sprintf("value must be one of %s", strings.Join(strEnum, ", ")) + } + } + } + + loadSchema := func(pname string) (*Schema, error) { + if pvalue, ok := m[pname]; ok { + return c.compile(r, nil, base, pvalue) + } + return nil, nil + } + + if s.Not, err = loadSchema("not"); err != nil { + return err + } + + loadSchemas := func(pname string) ([]*Schema, error) { + if pvalue, ok := m[pname]; ok { + pvalue := pvalue.([]interface{}) + schemas := make([]*Schema, len(pvalue)) + for i, v := range pvalue { + sch, err := c.compile(r, nil, base, v) + if err != nil { + return nil, err + } + schemas[i] = sch + } + return schemas, nil + } + return nil, nil + } + if s.AllOf, err = loadSchemas("allOf"); err != nil { + return err + } + if s.AnyOf, err = loadSchemas("anyOf"); err != nil { + return err + } + if s.OneOf, err = loadSchemas("oneOf"); err != nil { + return err + } + + loadInt := func(pname string) int { + if num, ok := m[pname]; ok { + i, _ := num.(json.Number).Int64() + return int(i) + } + return -1 + } + s.MinProperties, s.MaxProperties = loadInt("minProperties"), loadInt("maxProperties") + + if req, ok := m["required"]; ok { + s.Required = toStrings(req.([]interface{})) + } + + if props, ok := m["properties"]; ok { + props := props.(map[string]interface{}) + s.Properties = make(map[string]*Schema, len(props)) + for pname, pmap := range props { + s.Properties[pname], err = c.compile(r, nil, base, pmap) + if err != nil { + return err + } + } + } + + if regexProps, ok := m["regexProperties"]; ok { + s.RegexProperties = regexProps.(bool) + } + + if patternProps, ok := m["patternProperties"]; ok { + patternProps := patternProps.(map[string]interface{}) + s.PatternProperties = make(map[*regexp.Regexp]*Schema, len(patternProps)) + for pattern, pmap := range patternProps { + s.PatternProperties[regexp.MustCompile(pattern)], err = c.compile(r, nil, base, pmap) + if err != nil { + return err + } + } + } + + if additionalProps, ok := m["additionalProperties"]; ok { + switch additionalProps := additionalProps.(type) { + case bool: + if !additionalProps { + s.AdditionalProperties = false + } + case map[string]interface{}: + s.AdditionalProperties, err = c.compile(r, nil, base, additionalProps) + if err != nil { + return err + } + } + } + + if deps, ok := m["dependencies"]; ok { + deps := deps.(map[string]interface{}) + s.Dependencies = make(map[string]interface{}, len(deps)) + for pname, pvalue := range deps { + switch pvalue := pvalue.(type) { + case []interface{}: + s.Dependencies[pname] = toStrings(pvalue) + default: + s.Dependencies[pname], err = c.compile(r, nil, base, pvalue) + if err != nil { + return err + } + } + } + } + + s.MinItems, s.MaxItems = loadInt("minItems"), loadInt("maxItems") + + if unique, ok := m["uniqueItems"]; ok { + s.UniqueItems = unique.(bool) + } + + if items, ok := m["items"]; ok { + switch items := items.(type) { + case []interface{}: + s.Items, err = loadSchemas("items") + if err != nil { + return err + } + if additionalItems, ok := m["additionalItems"]; ok { + switch additionalItems := additionalItems.(type) { + case bool: + s.AdditionalItems = additionalItems + case map[string]interface{}: + s.AdditionalItems, err = c.compile(r, nil, base, additionalItems) + if err != nil { + return err + } + } + } else { + s.AdditionalItems = true + } + default: + s.Items, err = c.compile(r, nil, base, items) + if err != nil { + return err + } + } + } + + s.MinLength, s.MaxLength = loadInt("minLength"), loadInt("maxLength") + + if pattern, ok := m["pattern"]; ok { + s.Pattern = regexp.MustCompile(pattern.(string)) + } + + if format, ok := m["format"]; ok { + s.FormatName = format.(string) + s.Format, _ = formats.Get(s.FormatName) + } + + loadFloat := func(pname string) *big.Float { + if num, ok := m[pname]; ok { + r, _ := new(big.Float).SetString(string(num.(json.Number))) + return r + } + return nil + } + + s.Minimum = loadFloat("minimum") + if exclusive, ok := m["exclusiveMinimum"]; ok { + if exclusive, ok := exclusive.(bool); ok { + if exclusive { + s.Minimum, s.ExclusiveMinimum = nil, s.Minimum + } + } else { + s.ExclusiveMinimum = loadFloat("exclusiveMinimum") + } + } + + s.Maximum = loadFloat("maximum") + if exclusive, ok := m["exclusiveMaximum"]; ok { + if exclusive, ok := exclusive.(bool); ok { + if exclusive { + s.Maximum, s.ExclusiveMaximum = nil, s.Maximum + } + } else { + s.ExclusiveMaximum = loadFloat("exclusiveMaximum") + } + } + + s.MultipleOf = loadFloat("multipleOf") + + if c.ExtractAnnotations { + if title, ok := m["title"]; ok { + s.Title = title.(string) + } + if description, ok := m["description"]; ok { + s.Description = description.(string) + } + s.Default = m["default"] + } + + if r.draft.version >= 6 { + if c, ok := m["const"]; ok { + s.Constant = []interface{}{c} + } + if s.PropertyNames, err = loadSchema("propertyNames"); err != nil { + return err + } + if s.Contains, err = loadSchema("contains"); err != nil { + return err + } + } + + if r.draft.version >= 7 { + if m["if"] != nil && (m["then"] != nil || m["else"] != nil) { + if s.If, err = loadSchema("if"); err != nil { + return err + } + if s.Then, err = loadSchema("then"); err != nil { + return err + } + if s.Else, err = loadSchema("else"); err != nil { + return err + } + + if c.ExtractAnnotations { + if readOnly, ok := m["readOnly"]; ok { + s.ReadOnly = readOnly.(bool) + } + if writeOnly, ok := m["writeOnly"]; ok { + s.WriteOnly = writeOnly.(bool) + } + if examples, ok := m["examples"]; ok { + s.Examples = examples.([]interface{}) + } + } + } + + if encoding, ok := m["contentEncoding"]; ok { + s.ContentEncoding = encoding.(string) + s.Decoder, _ = decoders.Get(s.ContentEncoding) + } + if mediaType, ok := m["contentMediaType"]; ok { + s.ContentMediaType = mediaType.(string) + s.MediaType, _ = mediatypes.Get(s.ContentMediaType) + } + } + + return nil +} + +func toStrings(arr []interface{}) []string { + s := make([]string, len(arr)) + for i, v := range arr { + s[i] = v.(string) + } + return s +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/decoders/decoders.go b/vendor/github.com/santhosh-tekuri/jsonschema/decoders/decoders.go new file mode 100644 index 00000000000..4a1edc43c57 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/decoders/decoders.go @@ -0,0 +1,32 @@ +// Copyright 2017 Santhosh Kumar Tekuri. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package decoders provides functions to decode encoded-string. +// +// It allows developers to register custom encodings, that can be used +// in json-schema for validation. +package decoders + +import ( + "encoding/base64" +) + +// The Decoder type is a function, that returns +// the bytes represented by encoded string. +type Decoder func(string) ([]byte, error) + +var decoders = map[string]Decoder{ + "base64": base64.StdEncoding.DecodeString, +} + +// Register registers Decoder object for given encoding. +func Register(name string, d Decoder) { + decoders[name] = d +} + +// Get returns Decoder object for given encoding, if found. +func Get(name string) (Decoder, bool) { + d, ok := decoders[name] + return d, ok +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/doc.go b/vendor/github.com/santhosh-tekuri/jsonschema/doc.go new file mode 100644 index 00000000000..ea444425313 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/doc.go @@ -0,0 +1,77 @@ +// Copyright 2017 Santhosh Kumar Tekuri. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package jsonschema provides json-schema compilation and validation. + +This implementation of JSON Schema, supports draft4, draft6 and draft7. +Passes all tests(including optional) in https://github.com/json-schema/JSON-Schema-Test-Suite + +An example of using this package: + + schema, err := jsonschema.Compile("schemas/purchaseOrder.json") + if err != nil { + return err + } + f, err := os.Open("purchaseOrder.json") + if err != nil { + return err + } + defer f.Close() + if err = schema.Validate(f); err != nil { + return err + } + +The schema is compiled against the version specified in `$schema` property. +If `$schema` property is missing, it uses latest draft which currently is draft7. +You can force to use draft4 when `$schema` is missing, as follows: + + compiler := jsonschema.NewCompiler() + compler.Draft = jsonschema.Draft4 + +you can also validate go value using schema.ValidateInterface(interface{}) method. +but the argument should not be user-defined struct. + +This package supports loading json-schema from filePath and fileURL. + +To load json-schema from HTTPURL, add following import: + + import _ "github.com/santhosh-tekuri/jsonschema/httploader" + +Loading from urls for other schemes (such as ftp), can be plugged in. see package jsonschema/httploader +for an example + +To load json-schema from in-memory: + + data := `{"type": "string"}` + url := "sch.json" + compiler := jsonschema.NewCompiler() + if err := compiler.AddResource(url, strings.NewReader(data)); err != nil { + return err + } + schema, err := compiler.Compile(url) + if err != nil { + return err + } + f, err := os.Open("doc.json") + if err != nil { + return err + } + defer f.Close() + if err = schema.Validate(f); err != nil { + return err + } + +This package supports json string formats: date-time, date, time, hostname, email, ip-address, ipv4, ipv6, uri, uriref, regex, +format, json-pointer, relative-json-pointer, uri-template (limited validation). Developers can register their own formats using +package "github.com/santhosh-tekuri/jsonschema/formats". + +"base64" contentEncoding is supported. Custom decoders can be registered using package "github.com/santhosh-tekuri/jsonschema/decoders". + +"application/json" contentMediaType is supported. Custom mediatypes can be registered using package "github.com/santhosh-tekuri/jsonschema/mediatypes". + +The ValidationError returned by Validate method contains detailed context to understand why and where the error is. + +*/ +package jsonschema diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/draft4.go b/vendor/github.com/santhosh-tekuri/jsonschema/draft4.go new file mode 100644 index 00000000000..6c787bc1a76 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/draft4.go @@ -0,0 +1,172 @@ +// Copyright 2017 Santhosh Kumar Tekuri. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonschema + +import "strings" + +// Draft4 respresents http://json-schema.org/specification-links.html#draft-4 +var Draft4 = &Draft{id: "id", version: 4} + +func init() { + c := NewCompiler() + url := "http://json-schema.org/draft-04/schema" + err := c.AddResource(url, strings.NewReader(`{ + "$schema": "http://json-schema.org/draft-04/schema#", + "description": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "positiveInteger": { + "type": "integer", + "minimum": 0 + }, + "positiveIntegerDefault0": { + "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] + }, + "simpleTypes": { + "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "minItems": 1, + "uniqueItems": true + } + }, + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uriref" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "minimum": 0, + "exclusiveMinimum": true + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "boolean", + "default": false + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "boolean", + "default": false + }, + "maxLength": { "$ref": "#/definitions/positiveInteger" }, + "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/positiveInteger" }, + "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxProperties": { "$ref": "#/definitions/positiveInteger" }, + "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "regexProperties": true, + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "regexProperties": { "type": "boolean" }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" }, + "format": { "type": "string", "format": "format" }, + "$ref": { "type": "string" } + }, + "dependencies": { + "exclusiveMaximum": [ "maximum" ], + "exclusiveMinimum": [ "minimum" ] + }, + "default": {} + }`)) + if err != nil { + panic(err) + } + Draft4.meta = c.MustCompile(url) +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/draft6.go b/vendor/github.com/santhosh-tekuri/jsonschema/draft6.go new file mode 100644 index 00000000000..310245d1974 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/draft6.go @@ -0,0 +1,170 @@ +// Copyright 2017 Santhosh Kumar Tekuri. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonschema + +import "strings" + +// Draft6 respresents http://json-schema.org/specification-links.html#draft-6 +var Draft6 = &Draft{id: "$id", version: 6} + +func init() { + c := NewCompiler() + url := "http://json-schema.org/draft-06/schema" + err := c.AddResource(url, strings.NewReader(`{ + "$schema": "http://json-schema.org/draft-06/schema#", + "$id": "http://json-schema.org/draft-06/schema#", + "title": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "allOf": [ + { "$ref": "#/definitions/nonNegativeInteger" }, + { "default": 0 } + ] + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + }, + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, + "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { "$ref": "#" }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, + "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "contains": { "$ref": "#" }, + "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, + "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { "$ref": "#" }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "regexProperties": true, + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "propertyNames": { "$ref": "#" }, + "const": {}, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string", "format": "format" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "default": {} + }`)) + if err != nil { + panic(err) + } + Draft6.meta = c.MustCompile(url) +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/draft7.go b/vendor/github.com/santhosh-tekuri/jsonschema/draft7.go new file mode 100644 index 00000000000..68c88f0a08f --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/draft7.go @@ -0,0 +1,196 @@ +// Copyright 2017 Santhosh Kumar Tekuri. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonschema + +import "strings" + +// Draft7 respresents http://json-schema.org/specification-links.html#draft-7 +var Draft7 = &Draft{id: "$id", version: 7} + +func init() { + c := NewCompiler() + url := "http://json-schema.org/draft-07/schema" + err := c.AddResource(url, strings.NewReader(`{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://json-schema.org/draft-07/schema#", + "title": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "allOf": [ + { "$ref": "#/definitions/nonNegativeInteger" }, + { "default": 0 } + ] + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + }, + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "$comment": { + "type": "string" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "readOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + }, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, + "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { "$ref": "#" }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": true + }, + "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, + "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "contains": { "$ref": "#" }, + "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, + "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { "$ref": "#" }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "propertyNames": { "$ref": "#" }, + "const": true, + "enum": { + "type": "array", + "items": true, + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { + "type": "string", + "format": "format" + }, + "contentMediaType": { + "type": "string", + "format": "mediatype" + }, + "contentEncoding": { + "type": "string", + "format": "encoding" + }, + "if": {"$ref": "#"}, + "then": {"$ref": "#"}, + "else": {"$ref": "#"}, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "default": true + }`)) + if err != nil { + panic(err) + } + Draft7.meta = c.MustCompile(url) +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/errors.go b/vendor/github.com/santhosh-tekuri/jsonschema/errors.go new file mode 100644 index 00000000000..4bb61a925a5 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/errors.go @@ -0,0 +1,122 @@ +// Copyright 2017 Santhosh Kumar Tekuri. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonschema + +import ( + "fmt" + "strings" +) + +// InvalidJSONTypeError is the error type returned by ValidateInteface. +// this tells that specified go object is not valid jsonType. +type InvalidJSONTypeError string + +func (e InvalidJSONTypeError) Error() string { + return fmt.Sprintf("invalid jsonType: %s", string(e)) +} + +// SchemaError is the error type returned by Compile. +type SchemaError struct { + // SchemaURL is the url to json-schema that filed to compile. + // This is helpful, if your schema refers to external schemas + SchemaURL string + + // Err is the error that occurred during compilation. + // It could be ValidationError, because compilation validates + // given schema against the json meta-schema + Err error +} + +func (se *SchemaError) Error() string { + return fmt.Sprintf("json-schema %q compilation failed. Reason:\n%s", se.SchemaURL, se.Err) +} + +// ValidationError is the error type returned by Validate. +type ValidationError struct { + // Message describes error + Message string + + // InstancePtr is json-pointer which refers to json-fragment in json instance + // that is not valid + InstancePtr string + + // SchemaURL is the url to json-schema against which validation failed. + // This is helpful, if your schema refers to external schemas + SchemaURL string + + // SchemaPtr is json-pointer which refers to json-fragment in json schema + // that failed to satisfy + SchemaPtr string + + // Causes details the nested validation errors + Causes []*ValidationError +} + +func (ve *ValidationError) add(causes ...error) error { + for _, cause := range causes { + addContext(ve.InstancePtr, ve.SchemaPtr, cause) + ve.Causes = append(ve.Causes, cause.(*ValidationError)) + } + return ve +} + +func (ve *ValidationError) Error() string { + msg := fmt.Sprintf("I[%s] S[%s] %s", ve.InstancePtr, ve.SchemaPtr, ve.Message) + for _, c := range ve.Causes { + for _, line := range strings.Split(c.Error(), "\n") { + msg += "\n " + line + } + } + return msg +} + +func validationError(schemaPtr string, format string, a ...interface{}) *ValidationError { + return &ValidationError{fmt.Sprintf(format, a...), "", "", schemaPtr, nil} +} + +func addContext(instancePtr, schemaPtr string, err error) error { + ve := err.(*ValidationError) + ve.InstancePtr = joinPtr(instancePtr, ve.InstancePtr) + if len(ve.SchemaURL) == 0 { + ve.SchemaPtr = joinPtr(schemaPtr, ve.SchemaPtr) + } + for _, cause := range ve.Causes { + addContext(instancePtr, schemaPtr, cause) + } + return ve +} + +func finishSchemaContext(err error, s *Schema) { + ve := err.(*ValidationError) + if len(ve.SchemaURL) == 0 { + ve.SchemaURL = s.URL + ve.SchemaPtr = s.Ptr + "/" + ve.SchemaPtr + for _, cause := range ve.Causes { + finishSchemaContext(cause, s) + } + } +} + +func finishInstanceContext(err error) { + ve := err.(*ValidationError) + if len(ve.InstancePtr) == 0 { + ve.InstancePtr = "#" + } else { + ve.InstancePtr = "#/" + ve.InstancePtr + } + for _, cause := range ve.Causes { + finishInstanceContext(cause) + } +} + +func joinPtr(ptr1, ptr2 string) string { + if len(ptr1) == 0 { + return ptr2 + } + if len(ptr2) == 0 { + return ptr1 + } + return ptr1 + "/" + ptr2 +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/formats/formats.go b/vendor/github.com/santhosh-tekuri/jsonschema/formats/formats.go new file mode 100644 index 00000000000..03efa2bc46f --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/formats/formats.go @@ -0,0 +1,295 @@ +// Copyright 2017 Santhosh Kumar Tekuri. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package formats provides functions to check string against format. +// +// It allows developers to register custom formats, that can be used +// in json-schema for validation. +package formats + +import ( + "net" + "net/mail" + "net/url" + "regexp" + "strconv" + "strings" + "time" +) + +// The Format type is a function, to check +// whether given string is in valid format. +type Format func(string) bool + +var formats = map[string]Format{ + "date-time": IsDateTime, + "date": IsDate, + "time": IsTime, + "hostname": IsHostname, + "email": IsEmail, + "ip-address": IsIPV4, + "ipv4": IsIPV4, + "ipv6": IsIPV6, + "uri": IsURI, + "iri": IsURI, + "uri-reference": IsURIReference, + "uriref": IsURIReference, + "iri-reference": IsURIReference, + "uri-template": IsURITemplate, + "regex": IsRegex, + "json-pointer": IsJSONPointer, + "relative-json-pointer": IsRelativeJSONPointer, +} + +func init() { + formats["format"] = IsFormat +} + +// Register registers Format object for given format name. +func Register(name string, f Format) { + formats[name] = f +} + +// Get returns Format object for given format name, if found. +func Get(name string) (Format, bool) { + f, ok := formats[name] + return f, ok +} + +// IsFormat tells whether given string is a valid format that is registered. +func IsFormat(s string) bool { + _, ok := formats[s] + return ok +} + +// IsDateTime tells whether given string is a valid date representation +// as defined by RFC 3339, section 5.6. +// +// Note: this is unable to parse UTC leap seconds. See https://github.com/golang/go/issues/8728. +func IsDateTime(s string) bool { + if _, err := time.Parse(time.RFC3339, s); err == nil { + return true + } + if _, err := time.Parse(time.RFC3339Nano, s); err == nil { + return true + } + return false +} + +// IsDate tells whether given string is a valid full-date production +// as defined by RFC 3339, section 5.6. +func IsDate(s string) bool { + _, err := time.Parse("2006-01-02", s) + return err == nil +} + +// IsTime tells whether given string is a valid full-time production +// as defined by RFC 3339, section 5.6. +func IsTime(s string) bool { + if _, err := time.Parse("15:04:05Z07:00", s); err == nil { + return true + } + if _, err := time.Parse("15:04:05.999999999Z07:00", s); err == nil { + return true + } + return false +} + +// IsHostname tells whether given string is a valid representation +// for an Internet host name, as defined by RFC 1034, section 3.1. +// +// See https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names, for details. +func IsHostname(s string) bool { + // entire hostname (including the delimiting dots but not a trailing dot) has a maximum of 253 ASCII characters + s = strings.TrimSuffix(s, ".") + if len(s) > 253 { + return false + } + + // Hostnames are composed of series of labels concatenated with dots, as are all domain names + for _, label := range strings.Split(s, ".") { + // Each label must be from 1 to 63 characters long + if labelLen := len(label); labelLen < 1 || labelLen > 63 { + return false + } + + // labels could not start with a digit or with a hyphen + if first := s[0]; (first >= '0' && first <= '9') || (first == '-') { + return false + } + + // must not end with a hyphen + if label[len(label)-1] == '-' { + return false + } + + // labels may contain only the ASCII letters 'a' through 'z' (in a case-insensitive manner), + // the digits '0' through '9', and the hyphen ('-') + for _, c := range label { + if valid := (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || (c == '-'); !valid { + return false + } + } + } + + return true +} + +// IsEmail tells whether given string is a valid Internet email address +// as defined by RFC 5322, section 3.4.1. +// +// See https://en.wikipedia.org/wiki/Email_address, for details. +func IsEmail(s string) bool { + // entire email address to be no more than 254 characters long + if len(s) > 254 { + return false + } + + // email address is generally recognized as having two parts joined with an at-sign + at := strings.LastIndexByte(s, '@') + if at == -1 { + return false + } + local := s[0:at] + domain := s[at+1:] + + // local part may be up to 64 characters long + if len(local) > 64 { + return false + } + + // domain must match the requirements for a hostname + if !IsHostname(domain) { + return false + } + + _, err := mail.ParseAddress(s) + return err == nil +} + +// IsIPV4 tells whether given string is a valid representation of an IPv4 address +// according to the "dotted-quad" ABNF syntax as defined in RFC 2673, section 3.2. +func IsIPV4(s string) bool { + groups := strings.Split(s, ".") + if len(groups) != 4 { + return false + } + for _, group := range groups { + n, err := strconv.Atoi(group) + if err != nil { + return false + } + if n < 0 || n > 255 { + return false + } + } + return true +} + +// IsIPV6 tells whether given string is a valid representation of an IPv6 address +// as defined in RFC 2373, section 2.2. +func IsIPV6(s string) bool { + if !strings.Contains(s, ":") { + return false + } + return net.ParseIP(s) != nil +} + +// IsURI tells whether given string is valid URI, according to RFC 3986. +func IsURI(s string) bool { + u, err := url.Parse(s) + return err == nil && u.IsAbs() +} + +// IsURIReference tells whether given string is a valid URI Reference +// (either a URI or a relative-reference), according to RFC 3986. +func IsURIReference(s string) bool { + _, err := url.Parse(s) + return err == nil +} + +// IsURITemplate tells whether given string is a valid URI Template +// according to RFC6570. +// +// Current implementation does minimal validation. +func IsURITemplate(s string) bool { + u, err := url.Parse(s) + if err != nil { + return false + } + for _, item := range strings.Split(u.RawPath, "/") { + depth := 0 + for _, ch := range item { + switch ch { + case '{': + depth++ + if depth != 1 { + return false + } + case '}': + depth-- + if depth != 0 { + return false + } + } + } + if depth != 0 { + return false + } + } + return true +} + +// IsRegex tells whether given string is a valid regular expression, +// according to the ECMA 262 regular expression dialect. +// +// The implementation uses go-lang regexp package. +func IsRegex(s string) bool { + _, err := regexp.Compile(s) + return err == nil +} + +// IsJSONPointer tells whether given string is a valid JSON Pointer. +// +// Note: It returns false for JSON Pointer URI fragments. +func IsJSONPointer(s string) bool { + if s != "" && !strings.HasPrefix(s, "/") { + return false + } + for _, item := range strings.Split(s, "/") { + for i := 0; i < len(item); i++ { + if item[i] == '~' { + if i == len(item)-1 { + return false + } + switch item[i+1] { + case '~', '0', '1': + // valid + default: + return false + } + } + } + } + return true +} + +// IsRelativeJSONPointer tells whether given string is a valid Relative JSON Pointer. +// +// see https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01#section-3 +func IsRelativeJSONPointer(s string) bool { + if s == "" { + return false + } + if s[0] == '0' { + s = s[1:] + } else if s[0] >= '0' && s[0] <= '9' { + for s != "" && s[0] >= '0' && s[0] <= '9' { + s = s[1:] + } + } else { + return false + } + return s == "#" || IsJSONPointer(s) +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/go.mod b/vendor/github.com/santhosh-tekuri/jsonschema/go.mod new file mode 100644 index 00000000000..89a74866100 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/go.mod @@ -0,0 +1 @@ +module github.com/santhosh-tekuri/jsonschema diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/go.test.sh b/vendor/github.com/santhosh-tekuri/jsonschema/go.test.sh new file mode 100644 index 00000000000..88c4e8b6e7d --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -v -race -coverprofile=profile.out -covermode=atomic $d + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/loader/loader.go b/vendor/github.com/santhosh-tekuri/jsonschema/loader/loader.go new file mode 100644 index 00000000000..6ae19fb13b9 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/loader/loader.go @@ -0,0 +1,105 @@ +// Copyright 2017 Santhosh Kumar Tekuri. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package loader abstracts the reading document at given url. +// +// It allows developers to register loaders for different uri +// schemes. +package loader + +import ( + "fmt" + "io" + "net/url" + "os" + "path/filepath" + "runtime" + "strings" + "sync" +) + +// Loader is the interface that wraps the basic Load method. +// +// Load loads the document at given url and returns []byte, +// if successful. +type Loader interface { + Load(url string) (io.ReadCloser, error) +} + +type filePathLoader struct{} + +func (filePathLoader) Load(path string) (io.ReadCloser, error) { + return os.Open(path) +} + +type fileURLLoader struct{} + +func (fileURLLoader) Load(s string) (io.ReadCloser, error) { + u, err := url.Parse(s) + if err != nil { + return nil, err + } + f := u.Path + if runtime.GOOS == "windows" { + f = strings.TrimPrefix(f, "/") + f = filepath.FromSlash(f) + } + return os.Open(f) +} + +var registry = make(map[string]Loader) +var mutex = sync.RWMutex{} + +// SchemeNotRegisteredError is the error type returned by Load function. +// It tells that no Loader is registered for that URL Scheme. +type SchemeNotRegisteredError string + +func (s SchemeNotRegisteredError) Error() string { + return fmt.Sprintf("no Loader registered for scheme %s", string(s)) +} + +// Register registers given Loader for given URI Scheme. +func Register(scheme string, loader Loader) { + mutex.Lock() + defer mutex.Unlock() + registry[scheme] = loader +} + +// UnRegister unregisters the registered loader(if any) for given URI Scheme. +func UnRegister(scheme string) { + mutex.Lock() + defer mutex.Unlock() + delete(registry, scheme) +} + +func get(s string) (Loader, error) { + mutex.RLock() + defer mutex.RUnlock() + u, err := url.Parse(s) + if err != nil { + return nil, err + } + if loader, ok := registry[u.Scheme]; ok { + return loader, nil + } + return nil, SchemeNotRegisteredError(u.Scheme) +} + +// Load loads the document at given url and returns []byte, +// if successful. +// +// If no Loader is registered against the URI Scheme, then it +// returns *SchemeNotRegisteredError +var Load = func(url string) (io.ReadCloser, error) { + loader, err := get(url) + if err != nil { + return nil, err + } + return loader.Load(url) +} + +func init() { + Register("", filePathLoader{}) + Register("file", fileURLLoader{}) +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/mediatypes/mediatypes.go b/vendor/github.com/santhosh-tekuri/jsonschema/mediatypes/mediatypes.go new file mode 100644 index 00000000000..3c4ec3f53bf --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/mediatypes/mediatypes.go @@ -0,0 +1,39 @@ +// Copyright 2017 Santhosh Kumar Tekuri. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package mediatypes provides functions to validate data against mediatype. +// +// It allows developers to register custom mediatypes, that can be used +// in json-schema for validation. +package mediatypes + +import ( + "bytes" + "encoding/json" +) + +// The MediaType type is a function, that validates +// whether the bytes represent data of given mediaType. +type MediaType func([]byte) error + +var mediaTypes = map[string]MediaType{ + "application/json": validateJSON, +} + +// Register registers MediaType object for given mediaType. +func Register(name string, mt MediaType) { + mediaTypes[name] = mt +} + +// Get returns MediaType object for given mediaType, if found. +func Get(name string) (MediaType, bool) { + mt, ok := mediaTypes[name] + return mt, ok +} + +func validateJSON(b []byte) error { + decoder := json.NewDecoder(bytes.NewReader(b)) + var v interface{} + return decoder.Decode(&v) +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/resource.go b/vendor/github.com/santhosh-tekuri/jsonschema/resource.go new file mode 100644 index 00000000000..9f52cf3acd9 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/resource.go @@ -0,0 +1,236 @@ +// Copyright 2017 Santhosh Kumar Tekuri. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonschema + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/url" + "path/filepath" + "strconv" + "strings" +) + +type resource struct { + url string + doc interface{} + draft *Draft + schemas map[string]*Schema +} + +// DecodeJSON decodes json document from r. +// +// Note that number is decoded into json.Number instead of as a float64 +func DecodeJSON(r io.Reader) (interface{}, error) { + decoder := json.NewDecoder(r) + decoder.UseNumber() + var doc interface{} + if err := decoder.Decode(&doc); err != nil { + return nil, err + } + if t, _ := decoder.Token(); t != nil { + return nil, fmt.Errorf("invalid character %v after top-level value", t) + } + return doc, nil +} + +func newResource(base string, r io.Reader) (*resource, error) { + if strings.IndexByte(base, '#') != -1 { + panic(fmt.Sprintf("BUG: newResource(%q)", base)) + } + doc, err := DecodeJSON(r) + if err != nil { + return nil, fmt.Errorf("parsing %q failed. Reason: %v", base, err) + } + return &resource{ + url: base, + doc: doc, + schemas: make(map[string]*Schema)}, nil +} + +func resolveURL(base, ref string) (string, error) { + if ref == "" { + return base, nil + } + + refURL, err := url.Parse(ref) + if err != nil { + return "", err + } + if refURL.IsAbs() { + return normalize(ref), nil + } + + baseURL, err := url.Parse(base) + if err != nil { + return "", err + } + if baseURL.IsAbs() { + return normalize(baseURL.ResolveReference(refURL).String()), nil + } + + // filepath resolving + base, _ = split(base) + ref, fragment := split(ref) + if ref == "" { + return base + fragment, nil + } + dir, _ := filepath.Split(base) + return filepath.Join(dir, ref) + fragment, nil +} + +func (r *resource) resolvePtr(ptr string) (string, interface{}, error) { + if !strings.HasPrefix(ptr, "#/") { + panic(fmt.Sprintf("BUG: resolvePtr(%q)", ptr)) + } + base := r.url + p := strings.TrimPrefix(ptr, "#/") + doc := r.doc + for _, item := range strings.Split(p, "/") { + item = strings.Replace(item, "~1", "/", -1) + item = strings.Replace(item, "~0", "~", -1) + item, err := url.PathUnescape(item) + if err != nil { + return "", nil, errors.New("unable to url unscape: " + item) + } + switch d := doc.(type) { + case map[string]interface{}: + if id, ok := d[r.draft.id]; ok { + if id, ok := id.(string); ok { + if base, err = resolveURL(base, id); err != nil { + return "", nil, err + } + } + } + doc = d[item] + case []interface{}: + index, err := strconv.Atoi(item) + if err != nil { + return "", nil, fmt.Errorf("invalid $ref %q, reason: %s", ptr, err) + } + if index < 0 || index >= len(d) { + return "", nil, fmt.Errorf("invalid $ref %q, reason: array index outofrange", ptr) + } + doc = d[index] + default: + return "", nil, errors.New("invalid $ref " + ptr) + } + } + return base, doc, nil +} + +func split(uri string) (string, string) { + hash := strings.IndexByte(uri, '#') + if hash == -1 { + return uri, "#" + } + return uri[0:hash], uri[hash:] +} + +func normalize(url string) string { + base, fragment := split(url) + if rootFragment(fragment) { + fragment = "#" + } + return base + fragment +} + +func rootFragment(fragment string) bool { + return fragment == "" || fragment == "#" || fragment == "#/" +} + +func resolveIDs(draft *Draft, base string, v interface{}, ids map[string]map[string]interface{}) error { + m, ok := v.(map[string]interface{}) + if !ok { + return nil + } + if id, ok := m[draft.id]; ok { + b, err := resolveURL(base, id.(string)) + if err != nil { + return err + } + base = b + ids[base] = m + } + + for _, pname := range []string{"not", "additionalProperties"} { + if m, ok := m[pname]; ok { + if err := resolveIDs(draft, base, m, ids); err != nil { + return err + } + } + } + + for _, pname := range []string{"allOf", "anyOf", "oneOf"} { + if arr, ok := m[pname]; ok { + for _, m := range arr.([]interface{}) { + if err := resolveIDs(draft, base, m, ids); err != nil { + return err + } + } + } + } + + for _, pname := range []string{"definitions", "properties", "patternProperties", "dependencies"} { + if props, ok := m[pname]; ok { + for _, m := range props.(map[string]interface{}) { + if err := resolveIDs(draft, base, m, ids); err != nil { + return err + } + } + } + } + + if items, ok := m["items"]; ok { + switch items := items.(type) { + case map[string]interface{}: + if err := resolveIDs(draft, base, items, ids); err != nil { + return err + } + case []interface{}: + for _, item := range items { + if err := resolveIDs(draft, base, item, ids); err != nil { + return err + } + } + } + if additionalItems, ok := m["additionalItems"]; ok { + if additionalItems, ok := additionalItems.(map[string]interface{}); ok { + if err := resolveIDs(draft, base, additionalItems, ids); err != nil { + return err + } + } + } + } + + if draft.version >= 6 { + for _, pname := range []string{"propertyNames", "contains"} { + if m, ok := m[pname]; ok { + if err := resolveIDs(draft, base, m, ids); err != nil { + return err + } + } + } + } + + if draft.version >= 7 { + if iff, ok := m["if"]; ok { + if err := resolveIDs(draft, base, iff, ids); err != nil { + return err + } + for _, pname := range []string{"then", "else"} { + if m, ok := m[pname]; ok { + if err := resolveIDs(draft, base, m, ids); err != nil { + return err + } + } + } + } + } + + return nil +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/schema.go b/vendor/github.com/santhosh-tekuri/jsonschema/schema.go new file mode 100644 index 00000000000..8ee1638547f --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/schema.go @@ -0,0 +1,558 @@ +// Copyright 2017 Santhosh Kumar Tekuri. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonschema + +import ( + "encoding/json" + "fmt" + "io" + "math/big" + "net/url" + "regexp" + "strconv" + "strings" + "unicode/utf8" + + "github.com/santhosh-tekuri/jsonschema/decoders" + "github.com/santhosh-tekuri/jsonschema/formats" + "github.com/santhosh-tekuri/jsonschema/mediatypes" +) + +// A Schema represents compiled version of json-schema. +type Schema struct { + URL string // absolute url of the resource. + Ptr string // json-pointer to schema. always starts with `#`. + + // type agnostic validations + Always *bool // always pass/fail. used when booleans are used as schemas in draft-07. + Ref *Schema // reference to actual schema. if not nil, all the remaining fields are ignored. + Types []string // allowed types. + Constant []interface{} // first element in slice is constant value. note: slice is used to capture nil constant. + Enum []interface{} // allowed values. + enumError string // error message for enum fail. captured here to avoid constructing error message every time. + Not *Schema + AllOf []*Schema + AnyOf []*Schema + OneOf []*Schema + If *Schema + Then *Schema // nil, when If is nil. + Else *Schema // nil, when If is nil. + + // object validations + MinProperties int // -1 if not specified. + MaxProperties int // -1 if not specified. + Required []string // list of required properties. + Properties map[string]*Schema + PropertyNames *Schema + RegexProperties bool // property names must be valid regex. used only in draft4 as workaround in metaschema. + PatternProperties map[*regexp.Regexp]*Schema + AdditionalProperties interface{} // nil or false or *Schema. + Dependencies map[string]interface{} // value is *Schema or []string. + + // array validations + MinItems int // -1 if not specified. + MaxItems int // -1 if not specified. + UniqueItems bool + Items interface{} // nil or *Schema or []*Schema + AdditionalItems interface{} // nil or bool or *Schema. + Contains *Schema + + // string validations + MinLength int // -1 if not specified. + MaxLength int // -1 if not specified. + Pattern *regexp.Regexp + Format formats.Format + FormatName string + ContentEncoding string + Decoder decoders.Decoder + ContentMediaType string + MediaType mediatypes.MediaType + + // number validators + Minimum *big.Float + ExclusiveMinimum *big.Float + Maximum *big.Float + ExclusiveMaximum *big.Float + MultipleOf *big.Float + + // annotations. captured only when Compiler.ExtractAnnotations is true. + Title string + Description string + Default interface{} + ReadOnly bool + WriteOnly bool + Examples []interface{} +} + +// Compile parses json-schema at given url returns, if successful, +// a Schema object that can be used to match against json. +// +// The json-schema is validated with draft4 specification. +// Returned error can be *SchemaError +func Compile(url string) (*Schema, error) { + return NewCompiler().Compile(url) +} + +// MustCompile is like Compile but panics if the url cannot be compiled to *Schema. +// It simplifies safe initialization of global variables holding compiled Schemas. +func MustCompile(url string) *Schema { + return NewCompiler().MustCompile(url) +} + +// Validate validates the given json data, against the json-schema. +// +// Returned error can be *ValidationError. +func (s *Schema) Validate(r io.Reader) error { + doc, err := DecodeJSON(r) + if err != nil { + return err + } + return s.ValidateInterface(doc) +} + +// ValidateInterface validates given doc, against the json-schema. +// +// the doc must be the value decoded by json package using interface{} type. +// we recommend to use jsonschema.DecodeJSON(io.Reader) to decode JSON. +func (s *Schema) ValidateInterface(doc interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(InvalidJSONTypeError); ok { + err = r.(InvalidJSONTypeError) + } else { + panic(r) + } + } + }() + if err := s.validate(doc); err != nil { + finishSchemaContext(err, s) + finishInstanceContext(err) + return &ValidationError{ + Message: fmt.Sprintf("doesn't validate with %q", s.URL+s.Ptr), + InstancePtr: "#", + SchemaURL: s.URL, + SchemaPtr: s.Ptr, + Causes: []*ValidationError{err.(*ValidationError)}, + } + } + return nil +} + +// validate validates given value v with this schema. +func (s *Schema) validate(v interface{}) error { + if s.Always != nil { + if !*s.Always { + return validationError("", "always fail") + } + return nil + } + + if s.Ref != nil { + if err := s.Ref.validate(v); err != nil { + finishSchemaContext(err, s.Ref) + var refURL string + if s.URL == s.Ref.URL { + refURL = s.Ref.Ptr + } else { + refURL = s.Ref.URL + s.Ref.Ptr + } + return validationError("$ref", "doesn't validate with %q", refURL).add(err) + } + + // All other properties in a "$ref" object MUST be ignored + return nil + } + + if len(s.Types) > 0 { + vType := jsonType(v) + matched := false + for _, t := range s.Types { + if vType == t { + matched = true + break + } else if t == "integer" && vType == "number" { + if _, ok := new(big.Int).SetString(fmt.Sprint(v), 10); ok { + matched = true + break + } + } + } + if !matched { + return validationError("type", "expected %s, but got %s", strings.Join(s.Types, " or "), vType) + } + } + + if len(s.Constant) > 0 { + if !equals(v, s.Constant[0]) { + switch jsonType(s.Constant[0]) { + case "object", "array": + return validationError("const", "const failed") + default: + return validationError("const", "value must be %#v", s.Constant[0]) + } + } + } + + if len(s.Enum) > 0 { + matched := false + for _, item := range s.Enum { + if equals(v, item) { + matched = true + break + } + } + if !matched { + return validationError("enum", s.enumError) + } + } + + if s.Not != nil && s.Not.validate(v) == nil { + return validationError("not", "not failed") + } + + for i, sch := range s.AllOf { + if err := sch.validate(v); err != nil { + return validationError("allOf/"+strconv.Itoa(i), "allOf failed").add(err) + } + } + + if len(s.AnyOf) > 0 { + matched := false + var causes []error + for i, sch := range s.AnyOf { + if err := sch.validate(v); err == nil { + matched = true + break + } else { + causes = append(causes, addContext("", strconv.Itoa(i), err)) + } + } + if !matched { + return validationError("anyOf", "anyOf failed").add(causes...) + } + } + + if len(s.OneOf) > 0 { + matched := -1 + var causes []error + for i, sch := range s.OneOf { + if err := sch.validate(v); err == nil { + if matched == -1 { + matched = i + } else { + return validationError("oneOf", "valid against schemas at indexes %d and %d", matched, i) + } + } else { + causes = append(causes, addContext("", strconv.Itoa(i), err)) + } + } + if matched == -1 { + return validationError("oneOf", "oneOf failed").add(causes...) + } + } + + if s.If != nil { + if s.If.validate(v) == nil { + if s.Then != nil { + if err := s.Then.validate(v); err != nil { + return validationError("then", "if-then failed").add(err) + } + } + } else { + if s.Else != nil { + if err := s.Else.validate(v); err != nil { + return validationError("else", "if-else failed").add(err) + } + } + } + } + + switch v := v.(type) { + case map[string]interface{}: + if s.MinProperties != -1 && len(v) < s.MinProperties { + return validationError("minProperties", "minimum %d properties allowed, but found %d properties", s.MinProperties, len(v)) + } + if s.MaxProperties != -1 && len(v) > s.MaxProperties { + return validationError("maxProperties", "maximum %d properties allowed, but found %d properties", s.MaxProperties, len(v)) + } + if len(s.Required) > 0 { + var missing []string + for _, pname := range s.Required { + if _, ok := v[pname]; !ok { + missing = append(missing, strconv.Quote(pname)) + } + } + if len(missing) > 0 { + return validationError("required", "missing properties: %s", strings.Join(missing, ", ")) + } + } + + var additionalProps map[string]struct{} + if s.AdditionalProperties != nil { + additionalProps = make(map[string]struct{}, len(v)) + for pname := range v { + additionalProps[pname] = struct{}{} + } + } + + if len(s.Properties) > 0 { + for pname, pschema := range s.Properties { + if pvalue, ok := v[pname]; ok { + delete(additionalProps, pname) + if err := pschema.validate(pvalue); err != nil { + return addContext(escape(pname), "properties/"+escape(pname), err) + } + } + } + } + + if s.PropertyNames != nil { + for pname := range v { + if err := s.PropertyNames.validate(pname); err != nil { + return addContext(escape(pname), "propertyNames", err) + } + } + } + + if s.RegexProperties { + for pname := range v { + if !formats.IsRegex(pname) { + return validationError("", "patternProperty %q is not valid regex", pname) + } + } + } + for pattern, pschema := range s.PatternProperties { + for pname, pvalue := range v { + if pattern.MatchString(pname) { + delete(additionalProps, pname) + if err := pschema.validate(pvalue); err != nil { + return addContext(escape(pname), "patternProperties/"+escape(pattern.String()), err) + } + } + } + } + if s.AdditionalProperties != nil { + if _, ok := s.AdditionalProperties.(bool); ok { + if len(additionalProps) != 0 { + pnames := make([]string, 0, len(additionalProps)) + for pname := range additionalProps { + pnames = append(pnames, strconv.Quote(pname)) + } + return validationError("additionalProperties", "additionalProperties %s not allowed", strings.Join(pnames, ", ")) + } + } else { + schema := s.AdditionalProperties.(*Schema) + for pname := range additionalProps { + if pvalue, ok := v[pname]; ok { + if err := schema.validate(pvalue); err != nil { + return addContext(escape(pname), "additionalProperties", err) + } + } + } + } + } + for dname, dvalue := range s.Dependencies { + if _, ok := v[dname]; ok { + switch dvalue := dvalue.(type) { + case *Schema: + if err := dvalue.validate(v); err != nil { + return addContext("", "dependencies/"+escape(dname), err) + } + case []string: + for i, pname := range dvalue { + if _, ok := v[pname]; !ok { + return validationError("dependencies/"+escape(dname)+"/"+strconv.Itoa(i), "property %q is required, if %q property exists", pname, dname) + } + } + } + } + } + + case []interface{}: + if s.MinItems != -1 && len(v) < s.MinItems { + return validationError("minItems", "minimum %d items allowed, but found %d items", s.MinItems, len(v)) + } + if s.MaxItems != -1 && len(v) > s.MaxItems { + return validationError("maxItems", "maximum %d items allowed, but found %d items", s.MaxItems, len(v)) + } + if s.UniqueItems { + for i := 1; i < len(v); i++ { + for j := 0; j < i; j++ { + if equals(v[i], v[j]) { + return validationError("uniqueItems", "items at index %d and %d are equal", j, i) + } + } + } + } + switch items := s.Items.(type) { + case *Schema: + for i, item := range v { + if err := items.validate(item); err != nil { + return addContext(strconv.Itoa(i), "items", err) + } + } + case []*Schema: + if additionalItems, ok := s.AdditionalItems.(bool); ok { + if !additionalItems && len(v) > len(items) { + return validationError("additionalItems", "only %d items are allowed, but found %d items", len(items), len(v)) + } + } + for i, item := range v { + if i < len(items) { + if err := items[i].validate(item); err != nil { + return addContext(strconv.Itoa(i), "items/"+strconv.Itoa(i), err) + } + } else if sch, ok := s.AdditionalItems.(*Schema); ok { + if err := sch.validate(item); err != nil { + return addContext(strconv.Itoa(i), "additionalItems", err) + } + } else { + break + } + } + } + if s.Contains != nil { + matched := false + var causes []error + for i, item := range v { + if err := s.Contains.validate(item); err != nil { + causes = append(causes, addContext(strconv.Itoa(i), "", err)) + } else { + matched = true + break + } + } + if !matched { + return validationError("contains", "contains failed").add(causes...) + } + } + + case string: + if s.MinLength != -1 || s.MaxLength != -1 { + length := utf8.RuneCount([]byte(v)) + if s.MinLength != -1 && length < s.MinLength { + return validationError("minLength", "length must be >= %d, but got %d", s.MinLength, length) + } + if s.MaxLength != -1 && length > s.MaxLength { + return validationError("maxLength", "length must be <= %d, but got %d", s.MaxLength, length) + } + } + if s.Pattern != nil && !s.Pattern.MatchString(v) { + return validationError("pattern", "does not match pattern %q", s.Pattern) + } + if s.Format != nil && !s.Format(v) { + return validationError("format", "%q is not valid %q", v, s.FormatName) + } + + var content []byte + if s.Decoder != nil { + b, err := s.Decoder(v) + if err != nil { + return validationError("contentEncoding", "%q is not %s encoded", v, s.ContentEncoding) + } + content = b + } + if s.MediaType != nil { + if s.Decoder == nil { + content = []byte(v) + } + if err := s.MediaType(content); err != nil { + return validationError("contentMediaType", "value is not of mediatype %q", s.ContentMediaType) + } + } + + case json.Number, float64, int, int32, int64: + num, _ := new(big.Float).SetString(fmt.Sprint(v)) + if s.Minimum != nil && num.Cmp(s.Minimum) < 0 { + return validationError("minimum", "must be >= %v but found %v", s.Minimum, v) + } + if s.ExclusiveMinimum != nil && num.Cmp(s.ExclusiveMinimum) <= 0 { + return validationError("exclusiveMinimum", "must be > %v but found %v", s.ExclusiveMinimum, v) + } + if s.Maximum != nil && num.Cmp(s.Maximum) > 0 { + return validationError("maximum", "must be <= %v but found %v", s.Maximum, v) + } + if s.ExclusiveMaximum != nil && num.Cmp(s.ExclusiveMaximum) >= 0 { + return validationError("exclusiveMaximum", "must be < %v but found %v", s.ExclusiveMaximum, v) + } + if s.MultipleOf != nil { + if q := new(big.Float).Quo(num, s.MultipleOf); !q.IsInt() { + return validationError("multipleOf", "%v not multipleOf %v", v, s.MultipleOf) + } + } + } + + return nil +} + +// jsonType returns the json type of given value v. +// +// It panics if the given value is not valid json value +func jsonType(v interface{}) string { + switch v.(type) { + case nil: + return "null" + case bool: + return "boolean" + case json.Number, float64, int, int32, int64: + return "number" + case string: + return "string" + case []interface{}: + return "array" + case map[string]interface{}: + return "object" + } + panic(InvalidJSONTypeError(fmt.Sprintf("%T", v))) +} + +// equals tells if given two json values are equal or not. +func equals(v1, v2 interface{}) bool { + v1Type := jsonType(v1) + if v1Type != jsonType(v2) { + return false + } + switch v1Type { + case "array": + arr1, arr2 := v1.([]interface{}), v2.([]interface{}) + if len(arr1) != len(arr2) { + return false + } + for i := range arr1 { + if !equals(arr1[i], arr2[i]) { + return false + } + } + return true + case "object": + obj1, obj2 := v1.(map[string]interface{}), v2.(map[string]interface{}) + if len(obj1) != len(obj2) { + return false + } + for k, v1 := range obj1 { + if v2, ok := obj2[k]; ok { + if !equals(v1, v2) { + return false + } + } else { + return false + } + } + return true + case "number": + num1, _ := new(big.Float).SetString(string(v1.(json.Number))) + num2, _ := new(big.Float).SetString(string(v2.(json.Number))) + return num1.Cmp(num2) == 0 + default: + return v1 == v2 + } +} + +// escape converts given token to valid json-pointer token +func escape(token string) string { + token = strings.Replace(token, "~", "~0", -1) + token = strings.Replace(token, "/", "~1", -1) + return url.PathEscape(token) +} diff --git a/vendor/github.com/stretchr/testify/suite/doc.go b/vendor/github.com/stretchr/testify/suite/doc.go new file mode 100644 index 00000000000..f91a245d3f8 --- /dev/null +++ b/vendor/github.com/stretchr/testify/suite/doc.go @@ -0,0 +1,65 @@ +// Package suite contains logic for creating testing suite structs +// and running the methods on those structs as tests. The most useful +// piece of this package is that you can create setup/teardown methods +// on your testing suites, which will run before/after the whole suite +// or individual tests (depending on which interface(s) you +// implement). +// +// A testing suite is usually built by first extending the built-in +// suite functionality from suite.Suite in testify. Alternatively, +// you could reproduce that logic on your own if you wanted (you +// just need to implement the TestingSuite interface from +// suite/interfaces.go). +// +// After that, you can implement any of the interfaces in +// suite/interfaces.go to add setup/teardown functionality to your +// suite, and add any methods that start with "Test" to add tests. +// Methods that do not match any suite interfaces and do not begin +// with "Test" will not be run by testify, and can safely be used as +// helper methods. +// +// Once you've built your testing suite, you need to run the suite +// (using suite.Run from testify) inside any function that matches the +// identity that "go test" is already looking for (i.e. +// func(*testing.T)). +// +// Regular expression to select test suites specified command-line +// argument "-run". Regular expression to select the methods +// of test suites specified command-line argument "-m". +// Suite object has assertion methods. +// +// A crude example: +// // Basic imports +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// "github.com/stretchr/testify/suite" +// ) +// +// // Define the suite, and absorb the built-in basic suite +// // functionality from testify - including a T() method which +// // returns the current testing context +// type ExampleTestSuite struct { +// suite.Suite +// VariableThatShouldStartAtFive int +// } +// +// // Make sure that VariableThatShouldStartAtFive is set to five +// // before each test +// func (suite *ExampleTestSuite) SetupTest() { +// suite.VariableThatShouldStartAtFive = 5 +// } +// +// // All methods that begin with "Test" are run as tests within a +// // suite. +// func (suite *ExampleTestSuite) TestExample() { +// assert.Equal(suite.T(), 5, suite.VariableThatShouldStartAtFive) +// suite.Equal(5, suite.VariableThatShouldStartAtFive) +// } +// +// // In order for 'go test' to run this suite, we need to create +// // a normal test function and pass our suite to suite.Run +// func TestExampleTestSuite(t *testing.T) { +// suite.Run(t, new(ExampleTestSuite)) +// } +package suite diff --git a/vendor/github.com/stretchr/testify/suite/interfaces.go b/vendor/github.com/stretchr/testify/suite/interfaces.go new file mode 100644 index 00000000000..b37cb040987 --- /dev/null +++ b/vendor/github.com/stretchr/testify/suite/interfaces.go @@ -0,0 +1,46 @@ +package suite + +import "testing" + +// TestingSuite can store and return the current *testing.T context +// generated by 'go test'. +type TestingSuite interface { + T() *testing.T + SetT(*testing.T) +} + +// SetupAllSuite has a SetupSuite method, which will run before the +// tests in the suite are run. +type SetupAllSuite interface { + SetupSuite() +} + +// SetupTestSuite has a SetupTest method, which will run before each +// test in the suite. +type SetupTestSuite interface { + SetupTest() +} + +// TearDownAllSuite has a TearDownSuite method, which will run after +// all the tests in the suite have been run. +type TearDownAllSuite interface { + TearDownSuite() +} + +// TearDownTestSuite has a TearDownTest method, which will run after +// each test in the suite. +type TearDownTestSuite interface { + TearDownTest() +} + +// BeforeTest has a function to be executed right before the test +// starts and receives the suite and test names as input +type BeforeTest interface { + BeforeTest(suiteName, testName string) +} + +// AfterTest has a function to be executed right after the test +// finishes and receives the suite and test names as input +type AfterTest interface { + AfterTest(suiteName, testName string) +} diff --git a/vendor/github.com/stretchr/testify/suite/suite.go b/vendor/github.com/stretchr/testify/suite/suite.go new file mode 100644 index 00000000000..d708d7d7539 --- /dev/null +++ b/vendor/github.com/stretchr/testify/suite/suite.go @@ -0,0 +1,166 @@ +package suite + +import ( + "flag" + "fmt" + "os" + "reflect" + "regexp" + "runtime/debug" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var allTestsFilter = func(_, _ string) (bool, error) { return true, nil } +var matchMethod = flag.String("testify.m", "", "regular expression to select tests of the testify suite to run") + +// Suite is a basic testing suite with methods for storing and +// retrieving the current *testing.T context. +type Suite struct { + *assert.Assertions + require *require.Assertions + t *testing.T +} + +// T retrieves the current *testing.T context. +func (suite *Suite) T() *testing.T { + return suite.t +} + +// SetT sets the current *testing.T context. +func (suite *Suite) SetT(t *testing.T) { + suite.t = t + suite.Assertions = assert.New(t) + suite.require = require.New(t) +} + +// Require returns a require context for suite. +func (suite *Suite) Require() *require.Assertions { + if suite.require == nil { + suite.require = require.New(suite.T()) + } + return suite.require +} + +// Assert returns an assert context for suite. Normally, you can call +// `suite.NoError(expected, actual)`, but for situations where the embedded +// methods are overridden (for example, you might want to override +// assert.Assertions with require.Assertions), this method is provided so you +// can call `suite.Assert().NoError()`. +func (suite *Suite) Assert() *assert.Assertions { + if suite.Assertions == nil { + suite.Assertions = assert.New(suite.T()) + } + return suite.Assertions +} + +func failOnPanic(t *testing.T) { + r := recover() + if r != nil { + t.Errorf("test panicked: %v\n%s", r, debug.Stack()) + t.FailNow() + } +} + +// Run provides suite functionality around golang subtests. It should be +// called in place of t.Run(name, func(t *testing.T)) in test suite code. +// The passed-in func will be executed as a subtest with a fresh instance of t. +// Provides compatibility with go test pkg -run TestSuite/TestName/SubTestName. +func (suite *Suite) Run(name string, subtest func()) bool { + oldT := suite.T() + defer suite.SetT(oldT) + return oldT.Run(name, func(t *testing.T) { + suite.SetT(t) + subtest() + }) +} + +// Run takes a testing suite and runs all of the tests attached +// to it. +func Run(t *testing.T, suite TestingSuite) { + suite.SetT(t) + defer failOnPanic(t) + + suiteSetupDone := false + + methodFinder := reflect.TypeOf(suite) + tests := []testing.InternalTest{} + for index := 0; index < methodFinder.NumMethod(); index++ { + method := methodFinder.Method(index) + ok, err := methodFilter(method.Name) + if err != nil { + fmt.Fprintf(os.Stderr, "testify: invalid regexp for -m: %s\n", err) + os.Exit(1) + } + if !ok { + continue + } + if !suiteSetupDone { + if setupAllSuite, ok := suite.(SetupAllSuite); ok { + setupAllSuite.SetupSuite() + } + defer func() { + if tearDownAllSuite, ok := suite.(TearDownAllSuite); ok { + tearDownAllSuite.TearDownSuite() + } + }() + suiteSetupDone = true + } + test := testing.InternalTest{ + Name: method.Name, + F: func(t *testing.T) { + parentT := suite.T() + suite.SetT(t) + defer failOnPanic(t) + + if setupTestSuite, ok := suite.(SetupTestSuite); ok { + setupTestSuite.SetupTest() + } + if beforeTestSuite, ok := suite.(BeforeTest); ok { + beforeTestSuite.BeforeTest(methodFinder.Elem().Name(), method.Name) + } + defer func() { + if afterTestSuite, ok := suite.(AfterTest); ok { + afterTestSuite.AfterTest(methodFinder.Elem().Name(), method.Name) + } + if tearDownTestSuite, ok := suite.(TearDownTestSuite); ok { + tearDownTestSuite.TearDownTest() + } + suite.SetT(parentT) + }() + method.Func.Call([]reflect.Value{reflect.ValueOf(suite)}) + }, + } + tests = append(tests, test) + } + runTests(t, tests) +} + +func runTests(t testing.TB, tests []testing.InternalTest) { + r, ok := t.(runner) + if !ok { // backwards compatibility with Go 1.6 and below + if !testing.RunTests(allTestsFilter, tests) { + t.Fail() + } + return + } + + for _, test := range tests { + r.Run(test.Name, test.F) + } +} + +// Filtering method according to set regular expression +// specified command-line argument -m +func methodFilter(name string) (bool, error) { + if ok, _ := regexp.MatchString("^Test", name); !ok { + return false, nil + } + return regexp.MatchString(*matchMethod, name) +} + +type runner interface { + Run(name string, f func(t *testing.T)) bool +} diff --git a/vendor/go.elastic.co/apm/.dockerignore b/vendor/go.elastic.co/apm/.dockerignore new file mode 100644 index 00000000000..25e074c2513 --- /dev/null +++ b/vendor/go.elastic.co/apm/.dockerignore @@ -0,0 +1,2 @@ +scripts/docker* +scripts/Docker* diff --git a/vendor/go.elastic.co/apm/.gitignore b/vendor/go.elastic.co/apm/.gitignore new file mode 100644 index 00000000000..d5a32627a04 --- /dev/null +++ b/vendor/go.elastic.co/apm/.gitignore @@ -0,0 +1,5 @@ +*.swp +*.test +*.out +docs/html +build diff --git a/vendor/go.elastic.co/apm/.jenkins-edge.yml b/vendor/go.elastic.co/apm/.jenkins-edge.yml new file mode 100644 index 00000000000..3426e876db2 --- /dev/null +++ b/vendor/go.elastic.co/apm/.jenkins-edge.yml @@ -0,0 +1,2 @@ +GO_VERSION: + - "master" diff --git a/vendor/go.elastic.co/apm/.jenkins.yml b/vendor/go.elastic.co/apm/.jenkins.yml new file mode 100644 index 00000000000..e41a8a5e942 --- /dev/null +++ b/vendor/go.elastic.co/apm/.jenkins.yml @@ -0,0 +1,8 @@ +GO_VERSION: + - stable + - "1.x" + - "1.8.x" + - "1.9.x" + - "1.10.x" + - "1.11.x" + - "1.12.x" diff --git a/vendor/go.elastic.co/apm/CHANGELOG.asciidoc b/vendor/go.elastic.co/apm/CHANGELOG.asciidoc new file mode 100644 index 00000000000..f6cd5aa4be5 --- /dev/null +++ b/vendor/go.elastic.co/apm/CHANGELOG.asciidoc @@ -0,0 +1,262 @@ +ifdef::env-github[] +NOTE: Release notes are best read in our documentation at +https://www.elastic.co/guide/en/apm/agent/go/current/release-notes.html[elastic.co] +endif::[] + +//// +[[release-notes-x.x.x]] +==== x.x.x - YYYY/MM/DD + +[float] +===== Breaking changes + +[float] +===== Features +* Cool new feature: {pull}2526[#2526] + +[float] +===== Bug fixes +//// + +[[unreleased]] +=== Unreleased + +https://github.com/elastic/apm-agent-go/compare/v1.7.2...master[View commits] + +[[release-notes-1.x]] +=== Go Agent version 1.x + +[[release-notes-1.7.2]] +==== 1.7.2 - 2020/03/19 + +- Update cucumber/godog to 0.8.1 {pull}733[(#733)] + +[[release-notes-1.7.1]] +==== 1.7.1 - 2020/03/05 + +https://github.com/elastic/apm-agent-go/releases/tag/v1.7.1[View release] + +- Fix segfault on 32-bit architectures {pull}728[(#728)] + +[[release-notes-1.7.0]] +==== 1.7.0 - 2020/01/10 + +https://github.com/elastic/apm-agent-go/releases/tag/v1.7.0[View release] + + - Add span.context.destination.* {pull}664[(#664)] + - transport: fix Content-Type for pprof data {pull}679[(#679)] + - Add "tracestate" propagation {pull}690[(#690)] + - Add support for API Key auth {pull}698[(#698)] + - module/apmsql: report rows affected {pull}700[(#700)] + +[[release-notes-1.6.0]] +==== 1.6.0 - 2019/11/17 + +https://github.com/elastic/apm-agent-go/releases/tag/v1.6.0[View release] + + - module/apmhttp: add WithClientRequestName option {pull}609[(#609)] + - module/apmhttp: add WithPanicPropagation function {pull}611[(#611)] + - module/apmgoredis: add Client.RedisClient {pull}613[(#613)] + - Introduce apm.TraceFormatter, for formatting trace IDs {pull}635[(#635)] + - Report error cause(s), add support for errors.Unwrap {pull}638[(#638)] + - Setting `ELASTIC_APM_TRANSACTION_MAX_SPANS` to 0 now disables all spans {pull}640[(#640)] + - module/apmzerolog: add Writer.MinLevel {pull}641[(#641)] + - Introduce SetLabel and deprecate SetTag {pull}642[(#642)] + - Support central config for `ELASTIC_APM_CAPTURE_BODY` and `ELASTIC_APM_TRANSACTION_MAX_SPANS` {pull}648[(#648)] + - module/apmgorm: sql.ErrNoRows is no longer reported as an error {pull}645[(#645)] + - Server URL path is cleaned/canonicalizsed in order to avoid 301 redirects {pull}658[(#658)] + - `context.request.socket.remote_address` now reports the peer address {pull}662[(#662)] + - Experimental support for periodic CPU/heap profiling {pull}666[(#666)] + - module/apmnegroni: introduce tracing Negroni middleware {pull}671[(#671)] + - Unescape hyphens in k8s pod UIDs when the systemd cgroup driver is used {pull}672[(#672)] + - Read and propagate the standard W3C "traceparent" header {pull}674[(#674)] + +[[release-notes-1.5.0]] +==== 1.5.0 - 2019/07/31 + +https://github.com/elastic/apm-agent-go/releases/tag/v1.5.0[View release] + + - Add Context.SetCustom {pull}581[(#581)] + - Add support for extracting UUID-like container IDs {pull}577[(#577)] + - Introduce transaction/span breakdown metrics {pull}564[(#564)] + - Optimised HTTP request body capture {pull}592[(#592)] + - Fixed transaction encoding to drop tags (and other context) for non-sampled transactions {pull}593[(#593)] + - Introduce central config polling {pull}591[(#591)] + - Fixed apmgrpc client interceptor, propagating trace context for non-sampled transactions {pull}602[(#602)] + +[[release-notes-1.4.0]] +==== 1.4.0 - 2019/06/20 + +https://github.com/elastic/apm-agent-go/releases/tag/v1.4.0[View release] + + - Update opentracing-go dependency to v1.1.0 + - Update HTTP routers to return " unknown route" if route cannot be matched {pull}486[(#486)] + - module/apmchi: introduce instrumentation for go-chi/chi router {pull}495[(#495)] + - module/apmgoredis: introduce instrumentation for the go-redis/redis client {pull}505[(#505)] + - module/apmsql: exposed the QuerySignature function {pull}515[(#515)] + - module/apmgopg: introduce instrumentation for the go-pg/pg ORM {pull}516[(#516)] + - module/apmmongo: set minimum Go version to Go 1.10 {pull}522[(#522)] + - internal/sqlscanner: bug fix for multi-byte rune handling {pull}535[(#535)] + - module/apmgrpc: added WithServerRequestIgnorer server option {pull}531[(#531)] + - Introduce `ELASTIC_APM_GLOBAL_LABELS` config {pull}539[(#539)] + - module/apmgorm: register `row_query` callbacks {pull}532[(#532)] + - Introduce `ELASTIC_APM_STACK_TRACE_LIMIT` config {pull}559[(#559)] + - Include agent name/version and Go version in User-Agent {pull}560[(#560)] + - Truncate `error.culprit` at 1024 chars {pull}561[(#561)] + +[[release-notes-1.3.0]] +==== 1.3.0 - 2019/03/20 + +https://github.com/elastic/apm-agent-go/releases/tag/v1.3.0[View release] + + - Rename "metricset.labels" to "metricset.tags" {pull}438[(#438)] + - Introduce `ELASTIC_APM_DISABLE_METRICS` to disable metrics with matching names {pull}439[(#439)] + - module/apmelasticsearch: introduce instrumentation for Elasticsearch clients {pull}445[(#445)] + - module/apmmongo: introduce instrumentation for the MongoDB Go Driver {pull}452[(#452)] + - Introduce ErrorDetailer interface {pull}453[(#453)] + - module/apmhttp: add CloseIdleConnectons and CancelRequest to RoundTripper {pull}457[(#457)] + - Allow specifying transaction (span) ID via TransactionOptions/SpanOptions {pull}463[(#463)] + - module/apmzerolog: introduce zerolog log correlation and exception-tracking writer {pull}428[(#428)] + - module/apmelasticsearch: capture body for \_msearch, template and rollup search {pull}470[(#470)] + - Ended Transactions/Spans may now be used as parents {pull}478[(#478)] + - Introduce apm.DetachedContext for async/fire-and-forget trace propagation {pull}481[(#481)] + - module/apmechov4: add a copy of apmecho supporting echo/v4 {pull}477[(#477)] + +[[release-notes-1.2.0]] +==== 1.2.0 - 2019/01/17 + +https://github.com/elastic/apm-agent-go/releases/tag/v1.2.0[View release] + + - Add "transaction.sampled" to errors {pull}410[(#410)] + - Enforce license header in source files with go-licenser {pull}411[(#411)] + - module/apmot: ignore "follows-from" span references {pull}414[(#414)] + - module/apmot: report error log records {pull}415[(#415)] + - Introduce `ELASTIC_APM_CAPTURE_HEADERS` to control HTTP header capture {pull}418[(#418)] + - module/apmzap: introduce zap log correlation and exception-tracking hook {pull}426[(#426)] + - type Error implements error interface {pull}399[(#399)] + - Add "transaction.type" to errors {pull}433[(#433)] + - Added instrumentation-specific Go modules (i.e. one for each package under apm/module) {pull}405[(#405)] + +[[release-notes-1.1.3]] +==== 1.1.3 - 2019/01/06 + +https://github.com/elastic/apm-agent-go/releases/tag/v1.1.3[View release] + + - Remove the `agent.*` metrics {pull}407[(#407)] + - Add support for new github.com/pkg/errors.Frame type {pull}409[(#409)] + +[[release-notes-1.1.2]] +==== 1.1.2 - 2019/01/03 + +https://github.com/elastic/apm-agent-go/releases/tag/v1.1.2[View release] + + - Fix data race between Tracer.Active and Tracer.loop {pull}406[(#406)] + +[[release-notes-1.1.1]] +==== 1.1.1 - 2018/12/13 + +https://github.com/elastic/apm-agent-go/releases/tag/v1.1.1[View release] + + - CPU% metrics are now correctly in the range [0,1] + +[[release-notes-1.1.0]] +==== 1.1.0 - 2018/12/12 + +https://github.com/elastic/apm-agent-go/releases/tag/v1.1.0[View release] + + - Stop pooling Transaction/Span/Error, introduce internal pooled objects {pull}319[(#319)] + - Enable metrics collection with default interval of 30s {pull}322[(#322)] + - `ELASTIC_APM_SERVER_CERT` enables server certificate pinning {pull}325[(#325)] + - Add Docker container ID to metadata {pull}330[(#330)] + - Added distributed trace context propagation to apmgrpc {pull}335[(#335)] + - Introduce `Span.Subtype`, `Span.Action` {pull}332[(#332)] + - apm.StartSpanOptions fixed to stop ignoring options {pull}326[(#326)] + - Add Kubernetes pod info to metadata {pull}342[(#342)] + - module/apmsql: don't report driver.ErrBadConn, context.Canceled (#346, #348) + - Added ErrorLogRecord.Error field, for associating an error value with a log record {pull}380[(#380)] + - module/apmlogrus: introduce logrus exception-tracking hook, and log correlation {pull}381[(#381)] + - module/apmbeego: introduce Beego instrumentation module {pull}386[(#386)] + - module/apmhttp: report status code for client spans {pull}388[(#388)] + +[[release-notes-1.0.0]] +==== 1.0.0 - 2018/11/14 + +https://github.com/elastic/apm-agent-go/releases/tag/v1.0.0[View release] + + - Implement v2 intake protocol {pull}180[(#180)] + - Unexport Transaction.Timestamp and Span.Timestamp {pull}207[(#207)] + - Add jitter (+/-10%) to backoff on transport error {pull}212[(#212)] + - Add support for span tags {pull}213[(#213)] + - Require units for size configuration {pull}223[(#223)] + - Require units for duration configuration {pull}211[(#211)] + - Add support for multiple server URLs with failover {pull}233[(#233)] + - Add support for mixing OpenTracing spans with native transactions/spans {pull}235[(#235)] + - Drop SetHTTPResponseHeadersSent and SetHTTPResponseFinished methods from Context {pull}238[(#238)] + - Stop setting custom context (gin.handler) in apmgin {pull}238[(#238)] + - Set response context in errors reported by web modules {pull}238[(#238)] + - module/apmredigo: introduce gomodule/redigo instrumentation {pull}248[(#248)] + - Update Sampler interface to take TraceContext {pull}243[(#243)] + - Truncate SQL statements to a maximum of 10000 chars, all other strings to 1024 (#244, #276) + - Add leading slash to URLs in transaction/span context {pull}250[(#250)] + - Add `Transaction.Context` method for setting framework {pull}252[(#252)] + - Timestamps are now reported as usec since epoch, spans no longer use "start" offset {pull}257[(#257)] + - `ELASTIC_APM_SANITIZE_FIELD_NAMES` and `ELASTIC_APM_IGNORE_URLS` now use wildcard matching {pull}260[(#260)] + - Changed top-level package name to "apm", and canonical import path to "go.elastic.co/apm" {pull}202[(#202)] + - module/apmrestful: introduce emicklei/go-restful instrumentation {pull}270[(#270)] + - Fix panic handling in web instrumentations {pull}273[(#273)] + - Migrate internal/fastjson to go.elastic.co/fastjson {pull}275[(#275)] + - Report all HTTP request/response headers {pull}280[(#280)] + - Drop Context.SetCustom {pull}284[(#284)] + - Reuse memory for tags {pull}286[(#286)] + - Return a more helpful error message when /intake/v2/events 404s, to detect old servers {pull}290[(#290)] + - Implement test service for w3c/distributed-tracing test harness {pull}293[(#293)] + - End HTTP client spans on response body closure {pull}289[(#289)] + - module/apmgrpc requires Go 1.9+ {pull}300[(#300)] + - Invalid tag key characters are replaced with underscores {pull}308[(#308)] + - `ELASTIC_APM_LOG_FILE` and `ELASTIC_APM_LOG_LEVEL` introduced {pull}313[(#313)] + +[[release-notes-0.x]] +=== Go Agent version 0.x + +[[release-notes-0.5.2]] +==== 0.5.2 - 2018/09/19 + +https://github.com/elastic/apm-agent-go/releases/tag/v0.5.2[View release] + + - Fixed premature Span.End() in apmgorm callback, causing a data-race with captured errors {pull}229[(#229)] + +[[release-notes-0.5.1]] +==== 0.5.1 - 2018/09/05 + +https://github.com/elastic/apm-agent-go/releases/tag/v0.5.1[View release] + + - Fixed a bug causing error stacktraces and culprit to sometimes not be set {pull}204[(#204)] + +[[release-notes-0.5.0]] +==== 0.5.0 - 2018/08/27 + +https://github.com/elastic/apm-agent-go/releases/tag/v0.5.0[View release] + + - `ELASTIC_APM_SERVER_URL` now defaults to "http://localhost:8200" {pull}122[(#122)] + - `Transport.SetUserAgent` method added, enabling the User-Agent to be set programatically {pull}124[(#124)] + - Inlined functions are now properly reported in stacktraces {pull}127[(#127)] + - Support for the experimental metrics API added {pull}94[(#94)] + - module/apmsql: SQL is parsed to generate more useful span names {pull}129[(#129)] + - Basic vgo module added {pull}136[(#136)] + - module/apmhttprouter: added a wrapper type for `httprouter.Router` to simplify adding routes {pull}140[(#140)] + - Add `Transaction.Context` methods for setting user IDs {pull}144[(#144)] + - module/apmgocql: new instrumentation module, providing an observer for gocql {pull}148[(#148)] + - Add `ELASTIC_APM_SERVER_TIMEOUT` config {pull}157[(#157)] + - Add `ELASTIC_APM_IGNORE_URLS` config {pull}158[(#158)] + - module/apmsql: fix a bug preventing errors from being captured {pull}160[(#160)] + - Introduce `Tracer.StartTransactionOptions`, drop variadic args from `Tracer.StartTransaction` {pull}165[(#165)] + - module/apmgorm: introduce GORM instrumentation module (#169, #170) + - module/apmhttp: record outgoing request URLs in span context {pull}172[(#172)] + - module/apmot: introduce OpenTracing implementation {pull}173[(#173)] + +[[release-notes-0.4.0]] +==== 0.4.0 - 2018/06/17 + +https://github.com/elastic/apm-agent-go/releases/tag/v0.4.0[View release] + +First release of the Go agent for Elastic APM diff --git a/vendor/go.elastic.co/apm/CHANGELOG.md b/vendor/go.elastic.co/apm/CHANGELOG.md new file mode 100644 index 00000000000..e118c79191a --- /dev/null +++ b/vendor/go.elastic.co/apm/CHANGELOG.md @@ -0,0 +1 @@ +Release notes are now available in our documentation at ([elastic.co](https://www.elastic.co/guide/en/apm/agent/go/current/release-notes.html)) diff --git a/vendor/go.elastic.co/apm/CODE_OF_CONDUCT.md b/vendor/go.elastic.co/apm/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..c286a3152c4 --- /dev/null +++ b/vendor/go.elastic.co/apm/CODE_OF_CONDUCT.md @@ -0,0 +1,3 @@ +303 See Other + +Location: https://www.elastic.co/community/codeofconduct diff --git a/vendor/go.elastic.co/apm/CONTRIBUTING.md b/vendor/go.elastic.co/apm/CONTRIBUTING.md new file mode 100644 index 00000000000..cef6651e8db --- /dev/null +++ b/vendor/go.elastic.co/apm/CONTRIBUTING.md @@ -0,0 +1,91 @@ +# Contributing to the Go APM Agent + +The Go APM Agent is open source and we love to receive contributions from our community — you! + +There are many ways to contribute, from writing tutorials or blog posts, improving the +documentation, submitting bug reports and feature requests or writing code. + +You can get in touch with us through [Discuss](https://discuss.elastic.co/c/apm). +Feedback and ideas are always welcome. + +## Code contributions + +If you have a bugfix or new feature that involves significant changes that you would like to +contribute, please find or open an issue to discuss the changes first. It may be that somebody +is already working on it, or that there are particular issues that you should know about before +implementing the change. + +For minor changes (e.g. fixing a typo), you can just send your changes. + +### Submitting your changes + +Generally, we require that you test any code you are adding or modifying. Once your changes are +ready to submit for review: + +1. Sign the Contributor License Agreement + + Please make sure you have signed our [Contributor License Agreement](https://www.elastic.co/contributor-agreement/). + We are not asking you to assign copyright to us, but to give us the right to distribute + your code without restriction. We ask this of all contributors in order to assure our + users of the origin and continuing existence of the code. You only need to sign the CLA once. + +2. Test your changes + + Run the test suite to make sure that nothing is broken. + See [testing](#testing) for details. + +3. Review your changes + + Before sending your changes for review, it pays to review it yourself first! + + If you're making significant changes, please familiarize yourself with [Effective Go](https://golang.org/doc/effective_go.html) + and [go/wiki/CodeReviewComments](https://github.com/golang/go/wiki/CodeReviewComments). + These documents will walk you through writing idiomatic Go code, which we strive for. + + Here are a few things to check: + - format the code with [gofmt](https://golang.org/cmd/gofmt/) or [goimports](https://godoc.org/golang.org/x/tools/cmd/goimports) + - lint your code using [golint](https://github.com/golang/lint) + - check for common errors using [go vet](https://golang.org/cmd/vet/) + +4. Rebase your changes + + Update your local repository with the most recent code from the main repo, and rebase your + branch on top of the latest master branch. We prefer your initial changes to be squashed + into a single commit. Later, if we ask you to make changes, add them as separate commits. + This makes them easier to review. As a final step before merging we will either ask you to + squash all commits yourself or we'll do it for you. + +5. Submit a pull request + + Push your local changes to your forked copy of the repository and [submit a pull request](https://help.github.com/articles/using-pull-requests). + In the pull request, choose a title which sums up the changes that you have made, and in + the body provide more details about what your changes do, and the reason for making them. + Also mention the number of the issue where discussion has taken place, or issues that are + fixed/closed by the changes, e.g. "Closes #123". + +6. Be patient + + We might not be able to review your code as fast as we would like to, but we'll do our + best to dedicate it the attention it deserves. Your effort is much appreciated! + +### Testing + +The tests currently do not require any external resources, so just run `go test ./...`. +We test with all versions of Go from 1.8 onwards using [Travis CI](https://travis-ci.org). + +We track code coverage. 100% coverage is not a goal, but please do check that your tests +adequately cover the code using `go test -cover`. + +### Release procedure + +1. Update version.go and internal/apmversion/version.go, and then run "make update-modules" +1. Update [`CHANGELOG.asciidoc`](changelog.asciidoc), by adding a new version heading (`==== 1.x.x - yyyy/MM/dd`) and changing the base tag of the Unreleased comparison URL +1. For major and minor releases, update the EOL table in [`upgrading.asciidoc`](docs/upgrading.asciidoc). +1. Merge changes into github.com/elastic/apm-agent-go@master +1. Create tags: vN.N.N, and module/$MODULE/vN.N.N for each instrumentation module + + scripts/tagversion.sh + +1. Create release on GitHub + + hub release -d vN.N.N diff --git a/vendor/go.elastic.co/apm/Jenkinsfile b/vendor/go.elastic.co/apm/Jenkinsfile new file mode 100644 index 00000000000..07df4b18ce4 --- /dev/null +++ b/vendor/go.elastic.co/apm/Jenkinsfile @@ -0,0 +1,294 @@ +#!/usr/bin/env groovy + +@Library('apm@current') _ + +pipeline { + agent { label 'linux && immutable' } + environment { + REPO = 'apm-agent-go' + BASE_DIR = "src/go.elastic.co/apm" + NOTIFY_TO = credentials('notify-to') + JOB_GCS_BUCKET = credentials('gcs-bucket') + CODECOV_SECRET = 'secret/apm-team/ci/apm-agent-go-codecov' + GO111MODULE = 'on' + GOPATH = "${env.WORKSPACE}" + GOPROXY = 'https://proxy.golang.org' + HOME = "${env.WORKSPACE}" + GITHUB_CHECK_ITS_NAME = 'Integration Tests' + ITS_PIPELINE = 'apm-integration-tests-selector-mbp/master' + OPBEANS_REPO = 'opbeans-go' + } + options { + timeout(time: 1, unit: 'HOURS') + buildDiscarder(logRotator(numToKeepStr: '20', artifactNumToKeepStr: '20', daysToKeepStr: '30')) + timestamps() + ansiColor('xterm') + disableResume() + durabilityHint('PERFORMANCE_OPTIMIZED') + rateLimitBuilds(throttle: [count: 60, durationName: 'hour', userBoost: true]) + quietPeriod(10) + } + triggers { + issueCommentTrigger('(?i).*(?:jenkins\\W+)?run\\W+(?:the\\W+)?tests(?:\\W+please)?.*') + } + parameters { + string(name: 'GO_VERSION', defaultValue: "1.12.7", description: "Go version to use.") + booleanParam(name: 'Run_As_Master_Branch', defaultValue: false, description: 'Allow to run any steps on a PR, some steps normally only run on master branch.') + booleanParam(name: 'test_ci', defaultValue: true, description: 'Enable test') + booleanParam(name: 'docker_test_ci', defaultValue: true, description: 'Enable run docker tests') + booleanParam(name: 'bench_ci', defaultValue: true, description: 'Enable benchmarks') + } + stages { + stage('Initializing'){ + options { skipDefaultCheckout() } + environment { + GO_VERSION = "${params.GO_VERSION}" + PATH = "${env.PATH}:${env.WORKSPACE}/bin" + } + stages { + /** + Checkout the code and stash it, to use it on other stages. + */ + stage('Checkout') { + options { skipDefaultCheckout() } + steps { + pipelineManager([ cancelPreviousRunningBuilds: [ when: 'PR' ] ]) + deleteDir() + gitCheckout(basedir: "${BASE_DIR}", githubNotifyFirstTimeContributor: true, reference: '/var/lib/jenkins/.git-references/apm-agent-go.git') + stash allowEmpty: true, name: 'source', useDefaultExcludes: false + } + } + /** + Execute unit tests. + */ + stage('Tests') { + options { skipDefaultCheckout() } + when { + expression { return params.test_ci } + } + steps { + withGithubNotify(context: 'Tests', tab: 'tests') { + deleteDir() + unstash 'source' + dir("${BASE_DIR}"){ + script { + def go = readYaml(file: '.jenkins.yml') + def parallelTasks = [:] + go['GO_VERSION'].each{ version -> + parallelTasks["Go-${version}"] = generateStep(version) + } + // For the cutting edge + def edge = readYaml(file: '.jenkins-edge.yml') + edge['GO_VERSION'].each{ version -> + parallelTasks["Go-${version}"] = generateStepAndCatchError(version) + } + parallel(parallelTasks) + } + } + } + } + } + stage('Coverage') { + options { skipDefaultCheckout() } + when { + expression { return params.docker_test_ci } + } + steps { + withGithubNotify(context: 'Coverage') { + deleteDir() + unstash 'source' + dir("${BASE_DIR}"){ + sh script: './scripts/jenkins/before_install.sh', label: 'Install dependencies' + sh script: './scripts/jenkins/docker-test.sh', label: 'Docker tests' + } + } + } + post { + always { + coverageReport("${BASE_DIR}/build/coverage") + codecov(repo: env.REPO, basedir: "${BASE_DIR}", + flags: "-f build/coverage/coverage.cov -X search", + secret: "${CODECOV_SECRET}") + junit(allowEmptyResults: true, + keepLongStdio: true, + testResults: "${BASE_DIR}/build/junit-*.xml") + } + } + } + stage('Benchmark') { + agent { label 'linux && immutable' } + options { skipDefaultCheckout() } + when { + beforeAgent true + allOf { + anyOf { + branch 'master' + tag pattern: 'v\\d+\\.\\d+\\.\\d+.*', comparator: 'REGEXP' + expression { return params.Run_As_Master_Branch } + } + expression { return params.bench_ci } + } + } + steps { + withGithubNotify(context: 'Benchmark', tab: 'tests') { + deleteDir() + unstash 'source' + dir("${BASE_DIR}"){ + sh script: './scripts/jenkins/before_install.sh', label: 'Install dependencies' + sh script: './scripts/jenkins/bench.sh', label: 'Benchmarking' + sendBenchmarks(file: 'build/bench.out', index: 'benchmark-go') + } + } + } + } + } + } + stage('More OS') { + parallel { + stage('Windows') { + agent { label 'windows-2019-immutable' } + options { skipDefaultCheckout() } + environment { + GOROOT = "c:\\Go" + GOPATH = "${env.WORKSPACE}" + PATH = "${env.PATH};${env.GOROOT}\\bin;${env.GOPATH}\\bin" + GO_VERSION = "${params.GO_VERSION}" + } + steps { + withGithubNotify(context: 'Build-Test - Windows') { + cleanDir("${WORKSPACE}/${BASE_DIR}") + unstash 'source' + dir("${BASE_DIR}"){ + bat script: 'scripts/jenkins/windows/install-tools.bat', label: 'Install tools' + bat script: 'scripts/jenkins/windows/build-test.bat', label: 'Build and test' + } + } + } + post { + always { + junit(allowEmptyResults: true, keepLongStdio: true, testResults: "${BASE_DIR}/build/junit-*.xml") + } + } + } + stage('OSX') { + agent { label 'macosx' } + options { skipDefaultCheckout() } + environment { + GO_VERSION = "${params.GO_VERSION}" + PATH = "${env.PATH}:${env.WORKSPACE}/bin" + } + steps { + withGithubNotify(context: 'Build-Test - OSX') { + deleteDir() + unstash 'source' + dir("${BASE_DIR}"){ + sh script: './scripts/jenkins/before_install.sh', label: 'Install dependencies' + sh script: './scripts/jenkins/build-test.sh', label: 'Build and test' + } + } + } + post { + always { + junit(allowEmptyResults: true, keepLongStdio: true, testResults: "${BASE_DIR}/build/junit-*.xml") + deleteDir() + } + } + } + } + } + stage('Integration Tests') { + agent none + when { + beforeAgent true + allOf { + anyOf { + environment name: 'GIT_BUILD_CAUSE', value: 'pr' + expression { return !params.Run_As_Master_Branch } + } + } + } + steps { + build(job: env.ITS_PIPELINE, propagate: false, wait: false, + parameters: [string(name: 'INTEGRATION_TEST', value: 'Go'), + string(name: 'BUILD_OPTS', value: "--go-agent-version ${env.GIT_BASE_COMMIT} --opbeans-go-agent-branch ${env.GIT_BASE_COMMIT}"), + string(name: 'GITHUB_CHECK_NAME', value: env.GITHUB_CHECK_ITS_NAME), + string(name: 'GITHUB_CHECK_REPO', value: env.REPO), + string(name: 'GITHUB_CHECK_SHA1', value: env.GIT_BASE_COMMIT)]) + githubNotify(context: "${env.GITHUB_CHECK_ITS_NAME}", description: "${env.GITHUB_CHECK_ITS_NAME} ...", status: 'PENDING', targetUrl: "${env.JENKINS_URL}search/?q=${env.ITS_PIPELINE.replaceAll('/','+')}") + } + } + stage('Release') { + options { skipDefaultCheckout() } + when { + beforeAgent true + tag pattern: 'v\\d+\\.\\d+\\.\\d+', comparator: 'REGEXP' + } + stages { + stage('Opbeans') { + environment { + REPO_NAME = "${OPBEANS_REPO}" + GO_VERSION = "${params.GO_VERSION}" + } + steps { + deleteDir() + dir("${OPBEANS_REPO}"){ + git credentialsId: 'f6c7695a-671e-4f4f-a331-acdce44ff9ba', + url: "git@github.com:elastic/${OPBEANS_REPO}.git" + sh script: ".ci/bump-version.sh ${env.BRANCH_NAME}", label: 'Bump version' + // The opbeans-go pipeline will trigger a release for the master branch + gitPush() + // The opbeans-go pipeline will trigger a release for the release tag + gitCreateTag(tag: "${env.BRANCH_NAME}") + } + } + } + } + } + } + post { + cleanup { + notifyBuildResult() + } + } +} + +def generateStep(version){ + return { + node('linux && immutable'){ + try { + deleteDir() + unstash 'source' + echo "${version}" + dir("${BASE_DIR}"){ + withEnv(["GO_VERSION=${version}"]) { + // Another retry in case there are any environmental issues + // See https://issuetracker.google.com/issues/146072599 for more context + retry(2) { + sleep randomNumber(min: 2, max: 5) + sh script: './scripts/jenkins/before_install.sh', label: 'Install dependencies' + } + sh script: './scripts/jenkins/build-test.sh', label: 'Build and test' + } + } + } catch(e){ + error(e.toString()) + } finally { + junit(allowEmptyResults: true, + keepLongStdio: true, + testResults: "${BASE_DIR}/build/junit-*.xml") + } + } + } +} + +def generateStepAndCatchError(version){ + return { + catchError(buildResult: 'SUCCESS', message: 'Cutting Edge Tests', stageResult: 'UNSTABLE') { + generateStep(version) + } + } +} + +def cleanDir(path){ + powershell label: "Clean ${path}", script: "Remove-Item -Recurse -Force ${path}" +} diff --git a/vendor/go.elastic.co/apm/LICENSE b/vendor/go.elastic.co/apm/LICENSE new file mode 100644 index 00000000000..b1a731fb5a3 --- /dev/null +++ b/vendor/go.elastic.co/apm/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018 Elasticsearch BV + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.elastic.co/apm/Makefile b/vendor/go.elastic.co/apm/Makefile new file mode 100644 index 00000000000..572cd2647fc --- /dev/null +++ b/vendor/go.elastic.co/apm/Makefile @@ -0,0 +1,81 @@ +TEST_TIMEOUT?=5m +GO_LICENSER_EXCLUDE=stacktrace/testdata + +.PHONY: check +check: precheck check-modules test + +.PHONY: precheck +precheck: check-goimports check-lint check-vet check-dockerfile-testing check-licenses + +.PHONY: check-goimports +.PHONY: check-dockerfile-testing +.PHONY: check-lint +.PHONY: check-licenses +.PHONY: check-modules +ifeq ($(shell go run ./scripts/mingoversion.go -print 1.12),true) +check-goimports: + sh scripts/check_goimports.sh + +check-dockerfile-testing: + go run ./scripts/gendockerfile.go -d + +check-lint: + sh scripts/check_lint.sh + +check-licenses: + go-licenser -d $(patsubst %,-exclude %,$(GO_LICENSER_EXCLUDE)) . + +check-modules: + go run scripts/genmod/main.go -check . +else +check-goimports: +check-dockerfile-testing: +check-lint: +check-licenses: +check-modules: +endif + +.PHONY: check-vet +check-vet: + @for dir in $(shell scripts/moduledirs.sh); do (cd $$dir && go vet ./...) || exit $$?; done + +.PHONY: install +install: + go get -v -t ./... + +.PHONY: docker-test +docker-test: + scripts/docker-compose-testing run -T --rm go-agent-tests make test + +.PHONY: test +test: + @for dir in $(shell scripts/moduledirs.sh); do (cd $$dir && go test -v -timeout=$(TEST_TIMEOUT) ./...) || exit $$?; done + +.PHONY: coverage +coverage: + @bash scripts/test_coverage.sh + +.PHONY: fmt +fmt: + @GOIMPORTSFLAGS=-w sh scripts/goimports.sh + +.PHONY: clean +clean: + rm -fr docs/html + +.PHONY: update-modules +update-modules: + go run scripts/genmod/main.go . + +.PHONY: docs +docs: +ifdef ELASTIC_DOCS + $(ELASTIC_DOCS)/build_docs --direct_html --chunk=1 $(BUILD_DOCS_ARGS) --doc docs/index.asciidoc --out docs/html +else + @echo "\nELASTIC_DOCS is not defined.\n" + @exit 1 +endif + +.PHONY: update-licenses +update-licenses: + go-licenser $(patsubst %, -exclude %, $(GO_LICENSER_EXCLUDE)) . diff --git a/vendor/go.elastic.co/apm/NOTICE b/vendor/go.elastic.co/apm/NOTICE new file mode 100644 index 00000000000..147618ca4f9 --- /dev/null +++ b/vendor/go.elastic.co/apm/NOTICE @@ -0,0 +1,84 @@ +Elastic APM Go Agent +Copyright 2018-2019 Elasticsearch B.V. + +This product includes software developed at Elasticsearch, B.V. (https://www.elastic.co/). + +========================================= +Third party code included by the Go Agent +========================================= + +------------------------------------------------------------------------------------ +This project copies code from the Go standard library (https://github.com/golang/go) +------------------------------------------------------------------------------------ + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------- +This project copies code from Gorilla Mux (https://github.com/gorilla/mux) +-------------------------------------------------------------------------- + +Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------------------------------------------------ +This project copies code from pq (https://github.com/lib/pq) +------------------------------------------------------------ + +Copyright (c) 2011-2013, 'pq' Contributors Portions Copyright (C) 2011 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/go.elastic.co/apm/README.md b/vendor/go.elastic.co/apm/README.md new file mode 100644 index 00000000000..f173f83d4df --- /dev/null +++ b/vendor/go.elastic.co/apm/README.md @@ -0,0 +1,41 @@ +[![Build Status](https://apm-ci.elastic.co/buildStatus/icon?job=apm-agent-go/apm-agent-go-mbp/master)](https://apm-ci.elastic.co/job/apm-agent-go/job/apm-agent-go-mbp/job/master/) +[![GoDoc](https://godoc.org/go.elastic.co/apm?status.svg)](http://godoc.org/go.elastic.co/apm) +[![Travis-CI](https://travis-ci.org/elastic/apm-agent-go.svg)](https://travis-ci.org/elastic/apm-agent-go) +[![AppVeyor](https://ci.appveyor.com/api/projects/status/28fhswvqqc7p90f7?svg=true)](https://ci.appveyor.com/project/AndrewWilkins/apm-agent-go) +[![Go Report Card](https://goreportcard.com/badge/go.elastic.co/apm)](https://goreportcard.com/report/go.elastic.co/apm) +[![codecov.io](https://codecov.io/github/elastic/apm-agent-go/coverage.svg?branch=master)](https://codecov.io/github/elastic/apm-agent-go?branch=master) + +# apm-agent-go: APM Agent for Go + +This is the official Go package for [Elastic APM](https://www.elastic.co/solutions/apm). + +The Go agent enables you to trace the execution of operations in your application, +sending performance metrics and errors to the Elastic APM server. You can find a +list of the supported frameworks and other technologies in the [documentation](https://www.elastic.co/guide/en/apm/agent/go/current/supported-tech.html). + +We'd love to hear your feedback, please take a minute to fill out our [survey](https://docs.google.com/forms/d/e/1FAIpQLScbW7D8m-otPO7cxqeg7XstWR8vMnxG6brnXLs_TFVSTHuHvg/viewform?usp=sf_link). + +## Installation + +```bash +go get -u go.elastic.co/apm +``` + +## Requirements + +Tested with Go 1.8+ on Linux, Windows and MacOS. + +Requires [APM Server](https://github.com/elastic/apm-server) v6.5 or newer. + +## License + +Apache 2.0. + +## Documentation + +[Elastic APM Go documentation](https://www.elastic.co/guide/en/apm/agent/go/current/index.html). + +## Getting Help + +If you find a bug, please [report an issue](https://github.com/elastic/apm-agent-go/issues). +For any other assistance, please open or add to a topic on the [APM discuss forum](https://discuss.elastic.co/c/apm). diff --git a/vendor/go.elastic.co/apm/apmconfig/doc.go b/vendor/go.elastic.co/apm/apmconfig/doc.go new file mode 100644 index 00000000000..dd29e1525a6 --- /dev/null +++ b/vendor/go.elastic.co/apm/apmconfig/doc.go @@ -0,0 +1,20 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package apmconfig provides an API for watching agent config +// changes. +package apmconfig diff --git a/vendor/go.elastic.co/apm/apmconfig/watcher.go b/vendor/go.elastic.co/apm/apmconfig/watcher.go new file mode 100644 index 00000000000..57ea3c41240 --- /dev/null +++ b/vendor/go.elastic.co/apm/apmconfig/watcher.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apmconfig + +import ( + "context" +) + +// Watcher provides an interface for watching config changes. +type Watcher interface { + // WatchConfig subscribes to changes to configuration for the agent, + // which must match the given ConfigSelector. + // + // If the watcher experiences an unexpected error fetching config, + // it will surface this in a Change with the Err field set. + // + // If the provided context is cancelled, or the watcher experiences + // a fatal condition, the returned channel will be closed. + WatchConfig(context.Context, WatchParams) <-chan Change +} + +// WatchParams holds parameters for watching for config changes. +type WatchParams struct { + // Service holds the name and optionally environment name used + // for filtering the config to watch. + Service struct { + Name string + Environment string + } +} + +// Change holds an agent configuration change: an error or the new config attributes. +type Change struct { + // Err holds an error that occurred while querying agent config. + Err error + + // Attrs holds the agent's configuration. May be empty. + Attrs map[string]string +} diff --git a/vendor/go.elastic.co/apm/apmtest/configwatcher.go b/vendor/go.elastic.co/apm/apmtest/configwatcher.go new file mode 100644 index 00000000000..723466a79e8 --- /dev/null +++ b/vendor/go.elastic.co/apm/apmtest/configwatcher.go @@ -0,0 +1,32 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apmtest + +import ( + "context" + + "go.elastic.co/apm/apmconfig" +) + +// WatchConfigFunc is a function type that implements apmconfig.Watcher. +type WatchConfigFunc func(context.Context, apmconfig.WatchParams) <-chan apmconfig.Change + +// WatchConfig returns f(ctx, params). +func (f WatchConfigFunc) WatchConfig(ctx context.Context, params apmconfig.WatchParams) <-chan apmconfig.Change { + return f(ctx, params) +} diff --git a/vendor/go.elastic.co/apm/apmtest/discard.go b/vendor/go.elastic.co/apm/apmtest/discard.go new file mode 100644 index 00000000000..186b21af83e --- /dev/null +++ b/vendor/go.elastic.co/apm/apmtest/discard.go @@ -0,0 +1,52 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apmtest + +import ( + "log" + + "go.elastic.co/apm" + "go.elastic.co/apm/transport/transporttest" +) + +// DiscardTracer is an apm.Tracer that discards all events. +// +// This tracer may be used by multiple tests, and so should +// not be modified or closed. +// +// Importing apmttest will close apm.DefaultTracer, and update +// it to this value. +var DiscardTracer *apm.Tracer + +// NewDiscardTracer returns a new apm.Tracer that discards all events. +func NewDiscardTracer() *apm.Tracer { + tracer, err := apm.NewTracerOptions(apm.TracerOptions{ + Transport: transporttest.Discard, + }) + if err != nil { + log.Fatal(err) + } + return tracer +} + +func init() { + apm.DefaultTracer.Close() + tracer := NewDiscardTracer() + DiscardTracer = tracer + apm.DefaultTracer = DiscardTracer +} diff --git a/vendor/go.elastic.co/apm/apmtest/httpsuite.go b/vendor/go.elastic.co/apm/apmtest/httpsuite.go new file mode 100644 index 00000000000..cf075b24abe --- /dev/null +++ b/vendor/go.elastic.co/apm/apmtest/httpsuite.go @@ -0,0 +1,137 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apmtest + +import ( + "net/http" + "net/http/httptest" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "go.elastic.co/apm" + "go.elastic.co/apm/transport/transporttest" +) + +// HTTPTestSuite is a test suite for HTTP instrumentation modules. +type HTTPTestSuite struct { + suite.Suite + + // Handler holds an instrumented HTTP handler. Handler must + // support the following routes: + // + // GET /implicit_write (no explicit write on the response) + // GET /panic_before_write (panic without writing response) + // GET /panic_after_write (panic after writing response) + // + Handler http.Handler + + // Tracer is the apm.Tracer used to instrument Handler. + // + // HTTPTestSuite will close the tracer when all tests have + // been completed. + Tracer *apm.Tracer + + // Recorder is the transport used as the transport for Tracer. + Recorder *transporttest.RecorderTransport + + server *httptest.Server +} + +// SetupTest runs before each test. +func (s *HTTPTestSuite) SetupTest() { + s.Recorder.ResetPayloads() +} + +// SetupSuite runs before the tests in the suite are run. +func (s *HTTPTestSuite) SetupSuite() { + s.server = httptest.NewServer(s.Handler) +} + +// TearDownSuite runs after the tests in the suite are run. +func (s *HTTPTestSuite) TearDownSuite() { + if s.server != nil { + s.server.Close() + } + s.Tracer.Close() +} + +// TestImplicitWrite tests the behaviour of instrumented handlers +// for routes which do not explicitly write a response, but instead +// leave it to the framework to write an empty 200 response. +func (s *HTTPTestSuite) TestImplicitWrite() { + resp, err := http.Get(s.server.URL + "/implicit_write") + require.NoError(s.T(), err) + resp.Body.Close() + s.Equal(http.StatusOK, resp.StatusCode) + + s.Tracer.Flush(nil) + ps := s.Recorder.Payloads() + require.Len(s.T(), ps.Transactions, 1) + + tx := ps.Transactions[0] + s.Equal("HTTP 2xx", tx.Result) + s.Equal(resp.StatusCode, tx.Context.Response.StatusCode) +} + +// TestPanicBeforeWrite tests the behaviour of instrumented handlers +// for routes which panic before any headers are written. The handler +// is expected to recover the panic and write an empty 500 response. +func (s *HTTPTestSuite) TestPanicBeforeWrite() { + resp, err := http.Get(s.server.URL + "/panic_before_write") + require.NoError(s.T(), err) + resp.Body.Close() + s.Equal(http.StatusInternalServerError, resp.StatusCode) + + s.Tracer.Flush(nil) + ps := s.Recorder.Payloads() + require.Len(s.T(), ps.Transactions, 1) + require.Len(s.T(), ps.Errors, 1) + + tx := ps.Transactions[0] + s.Equal("HTTP 5xx", tx.Result) + s.Equal(resp.StatusCode, tx.Context.Response.StatusCode) + + e := ps.Errors[0] + s.Equal(tx.ID, e.ParentID) + s.Equal(resp.StatusCode, e.Context.Response.StatusCode) +} + +// TestPanicAfterWrite tests the behaviour of instrumented handlers +// for routes which panic after writing headers. The handler is +// expected to recover the panic without otherwise affecting the +// response. +func (s *HTTPTestSuite) TestPanicAfterWrite() { + resp, err := http.Get(s.server.URL + "/panic_after_write") + require.NoError(s.T(), err) + resp.Body.Close() + s.Equal(http.StatusOK, resp.StatusCode) + + s.Tracer.Flush(nil) + ps := s.Recorder.Payloads() + require.Len(s.T(), ps.Transactions, 1) + require.Len(s.T(), ps.Errors, 1) + + tx := ps.Transactions[0] + s.Equal("HTTP 2xx", tx.Result) + s.Equal(resp.StatusCode, tx.Context.Response.StatusCode) + + e := ps.Errors[0] + s.Equal(tx.ID, e.ParentID) + s.Equal(resp.StatusCode, e.Context.Response.StatusCode) +} diff --git a/vendor/go.elastic.co/apm/apmtest/recorder.go b/vendor/go.elastic.co/apm/apmtest/recorder.go new file mode 100644 index 00000000000..8f2e65519ee --- /dev/null +++ b/vendor/go.elastic.co/apm/apmtest/recorder.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apmtest + +import ( + "context" + "fmt" + + "go.elastic.co/apm" + "go.elastic.co/apm/model" + "go.elastic.co/apm/transport/transporttest" +) + +// NewRecordingTracer returns a new RecordingTracer, containing a new +// Tracer using the RecorderTransport stored inside. +func NewRecordingTracer() *RecordingTracer { + var result RecordingTracer + tracer, err := apm.NewTracerOptions(apm.TracerOptions{ + Transport: &result.RecorderTransport, + }) + if err != nil { + panic(err) + } + result.Tracer = tracer + return &result +} + +// RecordingTracer holds an apm.Tracer and transporttest.RecorderTransport. +type RecordingTracer struct { + *apm.Tracer + transporttest.RecorderTransport +} + +// WithTransaction calls rt.WithTransactionOptions with a zero apm.TransactionOptions. +func (rt *RecordingTracer) WithTransaction(f func(ctx context.Context)) (model.Transaction, []model.Span, []model.Error) { + return rt.WithTransactionOptions(apm.TransactionOptions{}, f) +} + +// WithTransactionOptions starts a transaction with the given options, +// calls f with the transaction in the provided context, ends the transaction +// and flushes the tracer, and then returns the resulting events. +func (rt *RecordingTracer) WithTransactionOptions(opts apm.TransactionOptions, f func(ctx context.Context)) (model.Transaction, []model.Span, []model.Error) { + tx := rt.StartTransactionOptions("name", "type", opts) + ctx := apm.ContextWithTransaction(context.Background(), tx) + f(ctx) + + tx.End() + rt.Flush(nil) + payloads := rt.Payloads() + if n := len(payloads.Transactions); n != 1 { + panic(fmt.Errorf("expected 1 transaction, got %d", n)) + } + return payloads.Transactions[0], payloads.Spans, payloads.Errors +} diff --git a/vendor/go.elastic.co/apm/apmtest/recordlogger.go b/vendor/go.elastic.co/apm/apmtest/recordlogger.go new file mode 100644 index 00000000000..9196c36851b --- /dev/null +++ b/vendor/go.elastic.co/apm/apmtest/recordlogger.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apmtest + +import "fmt" + +// RecordLogger is an implementation of apm.Logger, recording log entries. +type RecordLogger struct { + Records []LogRecord +} + +// Debugf logs debug messages. +func (l *RecordLogger) Debugf(format string, args ...interface{}) { + l.logf("debug", format, args...) +} + +// Errorf logs error messages. +func (l *RecordLogger) Errorf(format string, args ...interface{}) { + l.logf("error", format, args...) +} + +// Warningf logs error messages. +func (l *RecordLogger) Warningf(format string, args ...interface{}) { + l.logf("warning", format, args...) +} + +func (l *RecordLogger) logf(level string, format string, args ...interface{}) { + l.Records = append(l.Records, LogRecord{ + Level: level, + Format: format, + Message: fmt.Sprintf(format, args...), + }) +} + +// LogRecord holds the details of a log record. +type LogRecord struct { + // Level is the log level: "debug", "error", or "warning". + Level string + + // Format is the log message format, like "Thingy did foo %d times". + Format string + + // Message is the formatted message. + Message string +} diff --git a/vendor/go.elastic.co/apm/apmtest/testlogger.go b/vendor/go.elastic.co/apm/apmtest/testlogger.go new file mode 100644 index 00000000000..1bbbdf92a71 --- /dev/null +++ b/vendor/go.elastic.co/apm/apmtest/testlogger.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apmtest + +// TestLogger is an implementation of apm.Logger, +// logging to a testing.T. +type TestLogger struct { + l LogfLogger +} + +// NewTestLogger returns a new TestLogger that logs messages to l. +func NewTestLogger(l LogfLogger) TestLogger { + return TestLogger{l: l} +} + +// Debugf logs debug messages. +func (t TestLogger) Debugf(format string, args ...interface{}) { + t.l.Logf("[DEBUG] "+format, args...) +} + +// Errorf logs error messages. +func (t TestLogger) Errorf(format string, args ...interface{}) { + t.l.Logf("[ERROR] "+format, args...) +} + +// LogfLogger is an interface with the a Logf method, +// implemented by *testing.T and *testing.B. +type LogfLogger interface { + Logf(string, ...interface{}) +} diff --git a/vendor/go.elastic.co/apm/apmtest/withtransaction.go b/vendor/go.elastic.co/apm/apmtest/withtransaction.go new file mode 100644 index 00000000000..3c19998a498 --- /dev/null +++ b/vendor/go.elastic.co/apm/apmtest/withtransaction.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apmtest + +import ( + "context" + + "go.elastic.co/apm" + "go.elastic.co/apm/model" +) + +// WithTransaction is equivalent to calling WithTransactionOptions with a zero TransactionOptions. +func WithTransaction(f func(ctx context.Context)) (model.Transaction, []model.Span, []model.Error) { + return WithTransactionOptions(apm.TransactionOptions{}, f) +} + +// WithTransactionOptions calls f with a new context containing a transaction +// and transaction options, flushes the transaction to a test server, and returns +// the decoded transaction and any associated spans and errors. +func WithTransactionOptions(opts apm.TransactionOptions, f func(ctx context.Context)) (model.Transaction, []model.Span, []model.Error) { + tracer := NewRecordingTracer() + defer tracer.Close() + return tracer.WithTransactionOptions(opts, f) +} diff --git a/vendor/go.elastic.co/apm/breakdown.go b/vendor/go.elastic.co/apm/breakdown.go new file mode 100644 index 00000000000..df6cf519014 --- /dev/null +++ b/vendor/go.elastic.co/apm/breakdown.go @@ -0,0 +1,365 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apm + +import ( + "fmt" + "sync" + "sync/atomic" + "time" + "unsafe" + + "go.elastic.co/apm/model" +) + +const ( + // breakdownMetricsLimit is the maximum number of breakdown metric + // buckets to accumulate per reporting period. Metrics are broken + // down by {transactionType, transactionName, spanType, spanSubtype} + // tuples. + breakdownMetricsLimit = 1000 + + // appSpanType is the special span type associated with transactions, + // for reporting transaction self-time. + appSpanType = "app" + + // Breakdown metric names. + transactionDurationCountMetricName = "transaction.duration.count" + transactionDurationSumMetricName = "transaction.duration.sum.us" + transactionBreakdownCountMetricName = "transaction.breakdown.count" + spanSelfTimeCountMetricName = "span.self_time.count" + spanSelfTimeSumMetricName = "span.self_time.sum.us" +) + +type pad32 struct { + // Zero-sized on 64-bit architectures, 4 bytes on 32-bit. + _ [(unsafe.Alignof(uint64(0)) % 8) / 4]uintptr +} + +var ( + breakdownMetricsLimitWarning = fmt.Sprintf(` +The limit of %d breakdown metricsets has been reached, no new metricsets will be created. +Try to name your transactions so that there are less distinct transaction names.`[1:], + breakdownMetricsLimit, + ) +) + +// spanTimingsKey identifies a span type and subtype, for use as the key in +// spanTimingsMap. +type spanTimingsKey struct { + spanType string + spanSubtype string +} + +// spanTiming records the number of times a {spanType, spanSubtype} pair +// has occurred (within the context of a transaction group), along with +// the sum of the span durations. +type spanTiming struct { + duration int64 + count uintptr +} + +// spanTimingsMap records span timings for a transaction group. +type spanTimingsMap map[spanTimingsKey]spanTiming + +// add accumulates the timing for a {spanType, spanSubtype} pair. +func (m spanTimingsMap) add(spanType, spanSubtype string, d time.Duration) { + k := spanTimingsKey{spanType: spanType, spanSubtype: spanSubtype} + timing := m[k] + timing.count++ + timing.duration += int64(d) + m[k] = timing +} + +// reset resets m back to its initial zero state. +func (m spanTimingsMap) reset() { + for k := range m { + delete(m, k) + } +} + +// breakdownMetrics holds a pair of breakdown metrics maps. The "active" map +// accumulates new breakdown metrics, and is swapped with the "inactive" map +// just prior to when metrics gathering begins. When metrics gathering +// completes, the inactive map will be empty. +// +// breakdownMetrics may be written to concurrently by the tracer, and any +// number of other goroutines when a transaction cannot be enqueued. +type breakdownMetrics struct { + enabled bool + + mu sync.RWMutex + active, inactive *breakdownMetricsMap +} + +func newBreakdownMetrics() *breakdownMetrics { + return &breakdownMetrics{ + active: newBreakdownMetricsMap(), + inactive: newBreakdownMetricsMap(), + } +} + +type breakdownMetricsMap struct { + mu sync.RWMutex + entries int + m map[uint64][]*breakdownMetricsMapEntry + space []breakdownMetricsMapEntry +} + +func newBreakdownMetricsMap() *breakdownMetricsMap { + return &breakdownMetricsMap{ + m: make(map[uint64][]*breakdownMetricsMapEntry), + space: make([]breakdownMetricsMapEntry, breakdownMetricsLimit), + } +} + +type breakdownMetricsMapEntry struct { + breakdownTiming + breakdownMetricsKey +} + +// breakdownMetricsKey identifies a transaction group, and optionally a +// spanTimingsKey, for recording transaction and span breakdown metrics. +type breakdownMetricsKey struct { + transactionType string + transactionName string + spanTimingsKey +} + +func (k breakdownMetricsKey) hash() uint64 { + h := newFnv1a() + h.add(k.transactionType) + h.add(k.transactionName) + if k.spanType != "" { + h.add(k.spanType) + } + if k.spanSubtype != "" { + h.add(k.spanSubtype) + } + return uint64(h) +} + +// breakdownTiming holds breakdown metrics. +type breakdownTiming struct { + // transaction holds the "transaction.duration" metric values. + transaction spanTiming + + // Padding to ensure the span field below is 64-bit aligned. + _ pad32 + + // span holds the "span.self_time" metric values. + span spanTiming + + // breakdownCount records the number of transactions for which we + // have calculated breakdown metrics. If breakdown metrics are + // enabled, this will be equal transaction.count. + breakdownCount uintptr +} + +func (lhs *breakdownTiming) accumulate(rhs breakdownTiming) { + atomic.AddUintptr(&lhs.transaction.count, rhs.transaction.count) + atomic.AddInt64(&lhs.transaction.duration, rhs.transaction.duration) + atomic.AddUintptr(&lhs.span.count, rhs.span.count) + atomic.AddInt64(&lhs.span.duration, rhs.span.duration) + atomic.AddUintptr(&lhs.breakdownCount, rhs.breakdownCount) +} + +// recordTransaction records breakdown metrics for td into m. +// +// recordTransaction returns true if breakdown metrics were +// completely recorded, and false if any metrics were not +// recorded due to the limit being reached. +func (m *breakdownMetrics) recordTransaction(td *TransactionData) bool { + m.mu.RLock() + defer m.mu.RUnlock() + + k := breakdownMetricsKey{ + transactionType: td.Type, + transactionName: td.Name, + } + k.spanType = appSpanType + + var breakdownCount int + var transactionSpanTiming spanTiming + var transactionDuration = spanTiming{count: 1, duration: int64(td.Duration)} + if td.breakdownMetricsEnabled { + breakdownCount = 1 + endTime := td.timestamp.Add(td.Duration) + transactionSelfTime := td.Duration - td.childrenTimer.finalDuration(endTime) + transactionSpanTiming = spanTiming{count: 1, duration: int64(transactionSelfTime)} + } + + if !m.active.record(k, breakdownTiming{ + transaction: transactionDuration, + breakdownCount: uintptr(breakdownCount), + span: transactionSpanTiming, + }) { + // We couldn't record the transaction's metricset, so we won't + // be able to record spans for that transaction either. + return false + } + + ok := true + for sk, timing := range td.spanTimings { + k.spanTimingsKey = sk + ok = ok && m.active.record(k, breakdownTiming{span: timing}) + } + return ok +} + +// record records a single breakdown metric, identified by k. +func (m *breakdownMetricsMap) record(k breakdownMetricsKey, bt breakdownTiming) bool { + hash := k.hash() + m.mu.RLock() + entries, ok := m.m[hash] + m.mu.RUnlock() + var offset int + if ok { + for offset = range entries { + if entries[offset].breakdownMetricsKey == k { + // The append may reallocate the entries, but the + // entries are pointers into m.activeSpace. Therefore, + // entries' timings can safely be atomically incremented + // without holding the read lock. + entries[offset].breakdownTiming.accumulate(bt) + return true + } + } + offset++ // where to start searching with the write lock below + } + + m.mu.Lock() + entries, ok = m.m[hash] + if ok { + for i := range entries[offset:] { + if entries[offset+i].breakdownMetricsKey == k { + m.mu.Unlock() + entries[offset+i].breakdownTiming.accumulate(bt) + return true + } + } + } else if m.entries >= breakdownMetricsLimit { + m.mu.Unlock() + return false + } + entry := &m.space[m.entries] + *entry = breakdownMetricsMapEntry{ + breakdownTiming: bt, + breakdownMetricsKey: k, + } + m.m[hash] = append(entries, entry) + m.entries++ + m.mu.Unlock() + return true +} + +// gather is called by builtinMetricsGatherer to gather breakdown metrics. +func (m *breakdownMetrics) gather(out *Metrics) { + // Hold m.mu only long enough to swap m.active and m.inactive. + // This will be blocked by metric updates, but that's OK; only + // metrics gathering will be delayed. After swapping we do not + // need to hold m.mu, since nothing concurrently accesses + // m.inactive while the gatherer is iterating over it. + m.mu.Lock() + m.active, m.inactive = m.inactive, m.active + m.mu.Unlock() + + for hash, entries := range m.inactive.m { + for _, entry := range entries { + if entry.transaction.count > 0 { + out.transactionGroupMetrics = append(out.transactionGroupMetrics, &model.Metrics{ + Transaction: model.MetricsTransaction{ + Type: entry.transactionType, + Name: entry.transactionName, + }, + Samples: map[string]model.Metric{ + transactionDurationCountMetricName: { + Value: float64(entry.transaction.count), + }, + transactionDurationSumMetricName: { + Value: durationMicros(time.Duration(entry.transaction.duration)), + }, + transactionBreakdownCountMetricName: { + Value: float64(entry.breakdownCount), + }, + }, + }) + } + if entry.span.count > 0 { + out.transactionGroupMetrics = append(out.transactionGroupMetrics, &model.Metrics{ + Transaction: model.MetricsTransaction{ + Type: entry.transactionType, + Name: entry.transactionName, + }, + Span: model.MetricsSpan{ + Type: entry.spanType, + Subtype: entry.spanSubtype, + }, + Samples: map[string]model.Metric{ + spanSelfTimeCountMetricName: { + Value: float64(entry.span.count), + }, + spanSelfTimeSumMetricName: { + Value: durationMicros(time.Duration(entry.span.duration)), + }, + }, + }) + } + entry.breakdownMetricsKey = breakdownMetricsKey{} // release strings + } + delete(m.inactive.m, hash) + } + m.inactive.entries = 0 +} + +// childrenTimer tracks time spent by children of a transaction or span. +// +// childrenTimer is not goroutine-safe. +type childrenTimer struct { + // active holds the number active children. + active int + + // start holds the timestamp at which active went from zero to one. + start time.Time + + // totalDuration holds the total duration of time periods in which + // at least one child was active. + totalDuration time.Duration +} + +func (t *childrenTimer) childStarted(start time.Time) { + t.active++ + if t.active == 1 { + t.start = start + } +} + +func (t *childrenTimer) childEnded(end time.Time) { + t.active-- + if t.active == 0 { + t.totalDuration += end.Sub(t.start) + } +} + +func (t *childrenTimer) finalDuration(end time.Time) time.Duration { + if t.active > 0 { + t.active = 0 + t.totalDuration += end.Sub(t.start) + } + return t.totalDuration +} diff --git a/vendor/go.elastic.co/apm/builtin_metrics.go b/vendor/go.elastic.co/apm/builtin_metrics.go new file mode 100644 index 00000000000..546384efc8b --- /dev/null +++ b/vendor/go.elastic.co/apm/builtin_metrics.go @@ -0,0 +1,164 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apm + +import ( + "context" + "runtime" + + sysinfo "github.com/elastic/go-sysinfo" + "github.com/elastic/go-sysinfo/types" +) + +// builtinMetricsGatherer is an MetricsGatherer which gathers builtin metrics: +// - goroutines +// - memstats (allocations, usage, GC, etc.) +// - system and process CPU and memory usage +type builtinMetricsGatherer struct { + tracer *Tracer + lastSysMetrics sysMetrics +} + +func newBuiltinMetricsGatherer(t *Tracer) *builtinMetricsGatherer { + g := &builtinMetricsGatherer{tracer: t} + if metrics, err := gatherSysMetrics(); err == nil { + g.lastSysMetrics = metrics + } + return g +} + +// GatherMetrics gathers mem metrics into m. +func (g *builtinMetricsGatherer) GatherMetrics(ctx context.Context, m *Metrics) error { + m.Add("golang.goroutines", nil, float64(runtime.NumGoroutine())) + g.gatherSystemMetrics(m) + g.gatherMemStatsMetrics(m) + g.tracer.breakdownMetrics.gather(m) + return nil +} + +func (g *builtinMetricsGatherer) gatherSystemMetrics(m *Metrics) { + metrics, err := gatherSysMetrics() + if err != nil { + return + } + systemCPU, processCPU := calculateCPUUsage(metrics.cpu, g.lastSysMetrics.cpu) + m.Add("system.cpu.total.norm.pct", nil, systemCPU) + m.Add("system.process.cpu.total.norm.pct", nil, processCPU) + m.Add("system.memory.total", nil, float64(metrics.mem.system.Total)) + m.Add("system.memory.actual.free", nil, float64(metrics.mem.system.Available)) + m.Add("system.process.memory.size", nil, float64(metrics.mem.process.Virtual)) + m.Add("system.process.memory.rss.bytes", nil, float64(metrics.mem.process.Resident)) + g.lastSysMetrics = metrics +} + +func (g *builtinMetricsGatherer) gatherMemStatsMetrics(m *Metrics) { + var mem runtime.MemStats + runtime.ReadMemStats(&mem) + + addUint64 := func(name string, v uint64) { + m.Add(name, nil, float64(v)) + } + add := func(name string, v float64) { + m.Add(name, nil, v) + } + + addUint64("golang.heap.allocations.mallocs", mem.Mallocs) + addUint64("golang.heap.allocations.frees", mem.Frees) + addUint64("golang.heap.allocations.objects", mem.HeapObjects) + addUint64("golang.heap.allocations.total", mem.TotalAlloc) + addUint64("golang.heap.allocations.allocated", mem.HeapAlloc) + addUint64("golang.heap.allocations.idle", mem.HeapIdle) + addUint64("golang.heap.allocations.active", mem.HeapInuse) + addUint64("golang.heap.system.total", mem.Sys) + addUint64("golang.heap.system.obtained", mem.HeapSys) + addUint64("golang.heap.system.stack", mem.StackSys) + addUint64("golang.heap.system.released", mem.HeapReleased) + addUint64("golang.heap.gc.next_gc_limit", mem.NextGC) + addUint64("golang.heap.gc.total_count", uint64(mem.NumGC)) + addUint64("golang.heap.gc.total_pause.ns", mem.PauseTotalNs) + add("golang.heap.gc.cpu_fraction", mem.GCCPUFraction) +} + +func calculateCPUUsage(current, last cpuMetrics) (systemUsage, processUsage float64) { + idleDelta := current.system.Idle + current.system.IOWait - last.system.Idle - last.system.IOWait + systemTotalDelta := current.system.Total() - last.system.Total() + if systemTotalDelta <= 0 { + return 0, 0 + } + + idlePercent := float64(idleDelta) / float64(systemTotalDelta) + systemUsage = 1 - idlePercent + + processTotalDelta := current.process.Total() - last.process.Total() + processUsage = float64(processTotalDelta) / float64(systemTotalDelta) + + return systemUsage, processUsage +} + +type sysMetrics struct { + cpu cpuMetrics + mem memoryMetrics +} + +type cpuMetrics struct { + process types.CPUTimes + system types.CPUTimes +} + +type memoryMetrics struct { + process types.MemoryInfo + system *types.HostMemoryInfo +} + +func gatherSysMetrics() (sysMetrics, error) { + proc, err := sysinfo.Self() + if err != nil { + return sysMetrics{}, err + } + host, err := sysinfo.Host() + if err != nil { + return sysMetrics{}, err + } + hostTimes, err := host.CPUTime() + if err != nil { + return sysMetrics{}, err + } + hostMemory, err := host.Memory() + if err != nil { + return sysMetrics{}, err + } + procTimes, err := proc.CPUTime() + if err != nil { + return sysMetrics{}, err + } + procMemory, err := proc.Memory() + if err != nil { + return sysMetrics{}, err + } + + return sysMetrics{ + cpu: cpuMetrics{ + system: hostTimes, + process: procTimes, + }, + mem: memoryMetrics{ + system: hostMemory, + process: procMemory, + }, + }, nil +} diff --git a/vendor/go.elastic.co/apm/capturebody.go b/vendor/go.elastic.co/apm/capturebody.go new file mode 100644 index 00000000000..5e3f402b012 --- /dev/null +++ b/vendor/go.elastic.co/apm/capturebody.go @@ -0,0 +1,198 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apm + +import ( + "bytes" + "io" + "net/http" + "net/url" + "sync" + "unicode/utf8" + + "go.elastic.co/apm/internal/apmstrings" + "go.elastic.co/apm/model" +) + +// CaptureBodyMode holds a value indicating how a tracer should capture +// HTTP request bodies: for transactions, for errors, for both, or neither. +type CaptureBodyMode int + +const ( + // CaptureBodyOff disables capturing of HTTP request bodies. This is + // the default mode. + CaptureBodyOff CaptureBodyMode = 0 + + // CaptureBodyErrors captures HTTP request bodies for only errors. + CaptureBodyErrors CaptureBodyMode = 1 + + // CaptureBodyTransactions captures HTTP request bodies for only + // transactions. + CaptureBodyTransactions CaptureBodyMode = 1 << 1 + + // CaptureBodyAll captures HTTP request bodies for both transactions + // and errors. + CaptureBodyAll CaptureBodyMode = CaptureBodyErrors | CaptureBodyTransactions +) + +var bodyCapturerPool = sync.Pool{ + New: func() interface{} { + return &BodyCapturer{} + }, +} + +// CaptureHTTPRequestBody replaces req.Body and returns a possibly nil +// BodyCapturer which can later be passed to Context.SetHTTPRequestBody +// for setting the request body in a transaction or error context. If the +// tracer is not configured to capture HTTP request bodies, then req.Body +// is left alone and nil is returned. +// +// This must be called before the request body is read. The BodyCapturer's +// Discard method should be called after it is no longer needed, in order +// to recycle its memory. +func (t *Tracer) CaptureHTTPRequestBody(req *http.Request) *BodyCapturer { + if req.Body == nil { + return nil + } + captureBody := t.instrumentationConfig().captureBody + if captureBody == CaptureBodyOff { + return nil + } + + bc := bodyCapturerPool.Get().(*BodyCapturer) + bc.captureBody = captureBody + bc.request = req + bc.originalBody = req.Body + bc.buffer.Reset() + req.Body = bodyCapturerReadCloser{BodyCapturer: bc} + return bc +} + +// bodyCapturerReadCloser implements io.ReadCloser using the embedded BodyCapturer. +type bodyCapturerReadCloser struct { + *BodyCapturer +} + +// Close closes the original body. +func (bc bodyCapturerReadCloser) Close() error { + return bc.originalBody.Close() +} + +// Read reads from the original body, copying into bc.buffer. +func (bc bodyCapturerReadCloser) Read(p []byte) (int, error) { + n, err := bc.originalBody.Read(p) + if n > 0 { + bc.buffer.Write(p[:n]) + } + return n, err +} + +// BodyCapturer is returned by Tracer.CaptureHTTPRequestBody to later be +// passed to Context.SetHTTPRequestBody. +// +// Calling Context.SetHTTPRequestBody will reset req.Body to its original +// value, and invalidates the BodyCapturer. +type BodyCapturer struct { + captureBody CaptureBodyMode + + readbuf [bytes.MinRead]byte + buffer limitedBuffer + request *http.Request + originalBody io.ReadCloser +} + +// Discard discards the body capturer: the original request body is +// replaced, and the body capturer is returned to a pool for reuse. +// The BodyCapturer must not be used after calling this. +// +// Discard has no effect if bc is nil. +func (bc *BodyCapturer) Discard() { + if bc == nil { + return + } + bc.request.Body = bc.originalBody + bodyCapturerPool.Put(bc) +} + +func (bc *BodyCapturer) setContext(out *model.RequestBody) bool { + if bc.request.PostForm != nil { + // We must copy the map in case we need to + // sanitize the values. Ideally we should only + // copy if sanitization is necessary, but body + // capture shouldn't typically be enabled so + // we don't currently optimize this. + postForm := make(url.Values, len(bc.request.PostForm)) + for k, v := range bc.request.PostForm { + vcopy := make([]string, len(v)) + for i := range vcopy { + vcopy[i] = truncateString(v[i]) + } + postForm[k] = vcopy + } + out.Form = postForm + return true + } + + body, n := apmstrings.Truncate(bc.buffer.String(), stringLengthLimit) + if n == stringLengthLimit { + // There is at least enough data in the buffer + // to hit the string length limit, so we don't + // need to read from bc.originalBody as well. + out.Raw = body + return true + } + + // Read the remaining body, limiting to the maximum number of bytes + // that could make up the truncation limit. We ignore any errors here, + // and just return whatever we can. + rem := utf8.UTFMax * (stringLengthLimit - n) + for { + buf := bc.readbuf[:] + if rem < bytes.MinRead { + buf = buf[:rem] + } + n, err := bc.originalBody.Read(buf) + if n > 0 { + bc.buffer.Write(buf[:n]) + rem -= n + } + if rem == 0 || err != nil { + break + } + } + body, _ = apmstrings.Truncate(bc.buffer.String(), stringLengthLimit) + out.Raw = body + return body != "" +} + +type limitedBuffer struct { + bytes.Buffer +} + +func (b *limitedBuffer) Write(p []byte) (n int, err error) { + rem := (stringLengthLimit * utf8.UTFMax) - b.Len() + n = len(p) + if n > rem { + p = p[:rem] + } + written, err := b.Buffer.Write(p) + if err != nil { + n = written + } + return n, err +} diff --git a/vendor/go.elastic.co/apm/config.go b/vendor/go.elastic.co/apm/config.go new file mode 100644 index 00000000000..10f86e26bd3 --- /dev/null +++ b/vendor/go.elastic.co/apm/config.go @@ -0,0 +1,448 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apm + +import ( + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync/atomic" + "time" + "unsafe" + + "github.com/pkg/errors" + + "go.elastic.co/apm/internal/configutil" + "go.elastic.co/apm/internal/wildcard" + "go.elastic.co/apm/model" +) + +const ( + envMetricsInterval = "ELASTIC_APM_METRICS_INTERVAL" + envMaxSpans = "ELASTIC_APM_TRANSACTION_MAX_SPANS" + envTransactionSampleRate = "ELASTIC_APM_TRANSACTION_SAMPLE_RATE" + envSanitizeFieldNames = "ELASTIC_APM_SANITIZE_FIELD_NAMES" + envCaptureHeaders = "ELASTIC_APM_CAPTURE_HEADERS" + envCaptureBody = "ELASTIC_APM_CAPTURE_BODY" + envServiceName = "ELASTIC_APM_SERVICE_NAME" + envServiceVersion = "ELASTIC_APM_SERVICE_VERSION" + envEnvironment = "ELASTIC_APM_ENVIRONMENT" + envSpanFramesMinDuration = "ELASTIC_APM_SPAN_FRAMES_MIN_DURATION" + envActive = "ELASTIC_APM_ACTIVE" + envAPIRequestSize = "ELASTIC_APM_API_REQUEST_SIZE" + envAPIRequestTime = "ELASTIC_APM_API_REQUEST_TIME" + envAPIBufferSize = "ELASTIC_APM_API_BUFFER_SIZE" + envMetricsBufferSize = "ELASTIC_APM_METRICS_BUFFER_SIZE" + envDisableMetrics = "ELASTIC_APM_DISABLE_METRICS" + envGlobalLabels = "ELASTIC_APM_GLOBAL_LABELS" + envStackTraceLimit = "ELASTIC_APM_STACK_TRACE_LIMIT" + envCentralConfig = "ELASTIC_APM_CENTRAL_CONFIG" + envBreakdownMetrics = "ELASTIC_APM_BREAKDOWN_METRICS" + envUseElasticTraceparentHeader = "ELASTIC_APM_USE_ELASTIC_TRACEPARENT_HEADER" + + // NOTE(axw) profiling environment variables are experimental. + // They may be removed in a future minor version without being + // considered a breaking change. + envCPUProfileInterval = "ELASTIC_APM_CPU_PROFILE_INTERVAL" + envCPUProfileDuration = "ELASTIC_APM_CPU_PROFILE_DURATION" + envHeapProfileInterval = "ELASTIC_APM_HEAP_PROFILE_INTERVAL" + + defaultAPIRequestSize = 750 * configutil.KByte + defaultAPIRequestTime = 10 * time.Second + defaultAPIBufferSize = 1 * configutil.MByte + defaultMetricsBufferSize = 750 * configutil.KByte + defaultMetricsInterval = 30 * time.Second + defaultMaxSpans = 500 + defaultCaptureHeaders = true + defaultCaptureBody = CaptureBodyOff + defaultSpanFramesMinDuration = 5 * time.Millisecond + defaultStackTraceLimit = 50 + + minAPIBufferSize = 10 * configutil.KByte + maxAPIBufferSize = 100 * configutil.MByte + minAPIRequestSize = 1 * configutil.KByte + maxAPIRequestSize = 5 * configutil.MByte + minMetricsBufferSize = 10 * configutil.KByte + maxMetricsBufferSize = 100 * configutil.MByte +) + +var ( + defaultSanitizedFieldNames = configutil.ParseWildcardPatterns(strings.Join([]string{ + "password", + "passwd", + "pwd", + "secret", + "*key", + "*token*", + "*session*", + "*credit*", + "*card*", + "authorization", + "set-cookie", + }, ",")) + + globalLabels = func() model.StringMap { + var labels model.StringMap + for _, kv := range configutil.ParseListEnv(envGlobalLabels, ",", nil) { + i := strings.IndexRune(kv, '=') + if i > 0 { + k, v := strings.TrimSpace(kv[:i]), strings.TrimSpace(kv[i+1:]) + labels = append(labels, model.StringMapItem{ + Key: cleanLabelKey(k), + Value: truncateString(v), + }) + } + } + return labels + }() +) + +func initialRequestDuration() (time.Duration, error) { + return configutil.ParseDurationEnv(envAPIRequestTime, defaultAPIRequestTime) +} + +func initialMetricsInterval() (time.Duration, error) { + return configutil.ParseDurationEnv(envMetricsInterval, defaultMetricsInterval) +} + +func initialMetricsBufferSize() (int, error) { + size, err := configutil.ParseSizeEnv(envMetricsBufferSize, defaultMetricsBufferSize) + if err != nil { + return 0, err + } + if size < minMetricsBufferSize || size > maxMetricsBufferSize { + return 0, errors.Errorf( + "%s must be at least %s and less than %s, got %s", + envMetricsBufferSize, minMetricsBufferSize, maxMetricsBufferSize, size, + ) + } + return int(size), nil +} + +func initialAPIBufferSize() (int, error) { + size, err := configutil.ParseSizeEnv(envAPIBufferSize, defaultAPIBufferSize) + if err != nil { + return 0, err + } + if size < minAPIBufferSize || size > maxAPIBufferSize { + return 0, errors.Errorf( + "%s must be at least %s and less than %s, got %s", + envAPIBufferSize, minAPIBufferSize, maxAPIBufferSize, size, + ) + } + return int(size), nil +} + +func initialAPIRequestSize() (int, error) { + size, err := configutil.ParseSizeEnv(envAPIRequestSize, defaultAPIRequestSize) + if err != nil { + return 0, err + } + if size < minAPIRequestSize || size > maxAPIRequestSize { + return 0, errors.Errorf( + "%s must be at least %s and less than %s, got %s", + envAPIRequestSize, minAPIRequestSize, maxAPIRequestSize, size, + ) + } + return int(size), nil +} + +func initialMaxSpans() (int, error) { + value := os.Getenv(envMaxSpans) + if value == "" { + return defaultMaxSpans, nil + } + max, err := strconv.Atoi(value) + if err != nil { + return 0, errors.Wrapf(err, "failed to parse %s", envMaxSpans) + } + return max, nil +} + +// initialSampler returns a nil Sampler if all transactions should be sampled. +func initialSampler() (Sampler, error) { + value := os.Getenv(envTransactionSampleRate) + return parseSampleRate(envTransactionSampleRate, value) +} + +// parseSampleRate parses a numeric sampling rate in the range [0,1.0], returning a Sampler. +func parseSampleRate(name, value string) (Sampler, error) { + if value == "" { + value = "1" + } + ratio, err := strconv.ParseFloat(value, 64) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse %s", name) + } + if ratio < 0.0 || ratio > 1.0 { + return nil, errors.Errorf( + "invalid value for %s: %s (out of range [0,1.0])", + name, value, + ) + } + return NewRatioSampler(ratio), nil +} + +func initialSanitizedFieldNames() wildcard.Matchers { + return configutil.ParseWildcardPatternsEnv(envSanitizeFieldNames, defaultSanitizedFieldNames) +} + +func initialCaptureHeaders() (bool, error) { + return configutil.ParseBoolEnv(envCaptureHeaders, defaultCaptureHeaders) +} + +func initialCaptureBody() (CaptureBodyMode, error) { + value := os.Getenv(envCaptureBody) + if value == "" { + return defaultCaptureBody, nil + } + return parseCaptureBody(envCaptureBody, value) +} + +func parseCaptureBody(name, value string) (CaptureBodyMode, error) { + switch strings.TrimSpace(strings.ToLower(value)) { + case "all": + return CaptureBodyAll, nil + case "errors": + return CaptureBodyErrors, nil + case "transactions": + return CaptureBodyTransactions, nil + case "off": + return CaptureBodyOff, nil + } + return -1, errors.Errorf("invalid %s value %q", name, value) +} + +func initialService() (name, version, environment string) { + name = os.Getenv(envServiceName) + version = os.Getenv(envServiceVersion) + environment = os.Getenv(envEnvironment) + if name == "" { + name = filepath.Base(os.Args[0]) + if runtime.GOOS == "windows" { + name = strings.TrimSuffix(name, filepath.Ext(name)) + } + } + name = sanitizeServiceName(name) + return name, version, environment +} + +func initialSpanFramesMinDuration() (time.Duration, error) { + return configutil.ParseDurationEnv(envSpanFramesMinDuration, defaultSpanFramesMinDuration) +} + +func initialActive() (bool, error) { + return configutil.ParseBoolEnv(envActive, true) +} + +func initialDisabledMetrics() wildcard.Matchers { + return configutil.ParseWildcardPatternsEnv(envDisableMetrics, nil) +} + +func initialStackTraceLimit() (int, error) { + value := os.Getenv(envStackTraceLimit) + if value == "" { + return defaultStackTraceLimit, nil + } + limit, err := strconv.Atoi(value) + if err != nil { + return 0, errors.Wrapf(err, "failed to parse %s", envStackTraceLimit) + } + return limit, nil +} + +func initialCentralConfigEnabled() (bool, error) { + return configutil.ParseBoolEnv(envCentralConfig, true) +} + +func initialBreakdownMetricsEnabled() (bool, error) { + return configutil.ParseBoolEnv(envBreakdownMetrics, true) +} + +func initialUseElasticTraceparentHeader() (bool, error) { + return configutil.ParseBoolEnv(envUseElasticTraceparentHeader, true) +} + +func initialCPUProfileIntervalDuration() (time.Duration, time.Duration, error) { + interval, err := configutil.ParseDurationEnv(envCPUProfileInterval, 0) + if err != nil || interval <= 0 { + return 0, 0, err + } + duration, err := configutil.ParseDurationEnv(envCPUProfileDuration, 0) + if err != nil || duration <= 0 { + return 0, 0, err + } + return interval, duration, nil +} + +func initialHeapProfileInterval() (time.Duration, error) { + return configutil.ParseDurationEnv(envHeapProfileInterval, 0) +} + +// updateRemoteConfig updates t and cfg with changes held in "attrs", and reverts to local +// config for config attributes that have been removed (exist in old but not in attrs). +// +// On return from updateRemoteConfig, unapplied config will have been removed from attrs. +func (t *Tracer) updateRemoteConfig(logger WarningLogger, old, attrs map[string]string) { + warningf := func(string, ...interface{}) {} + debugf := func(string, ...interface{}) {} + errorf := func(string, ...interface{}) {} + if logger != nil { + warningf = logger.Warningf + debugf = logger.Debugf + errorf = logger.Errorf + } + envName := func(k string) string { + return "ELASTIC_APM_" + strings.ToUpper(k) + } + + var updates []func(cfg *instrumentationConfig) + for k, v := range attrs { + if oldv, ok := old[k]; ok && oldv == v { + continue + } + switch envName(k) { + case envCaptureBody: + value, err := parseCaptureBody(k, v) + if err != nil { + errorf("central config failure: %s", err) + delete(attrs, k) + continue + } else { + updates = append(updates, func(cfg *instrumentationConfig) { + cfg.captureBody = value + }) + } + case envMaxSpans: + value, err := strconv.Atoi(v) + if err != nil { + errorf("central config failure: failed to parse %s: %s", k, err) + delete(attrs, k) + continue + } else { + updates = append(updates, func(cfg *instrumentationConfig) { + cfg.maxSpans = value + }) + } + case envTransactionSampleRate: + sampler, err := parseSampleRate(k, v) + if err != nil { + errorf("central config failure: %s", err) + delete(attrs, k) + continue + } else { + updates = append(updates, func(cfg *instrumentationConfig) { + cfg.sampler = sampler + }) + } + default: + warningf("central config failure: unsupported config: %s", k) + delete(attrs, k) + continue + } + debugf("central config update: updated %s to %s", k, v) + } + for k := range old { + if _, ok := attrs[k]; ok { + continue + } + updates = append(updates, func(cfg *instrumentationConfig) { + if f, ok := cfg.local[envName(k)]; ok { + f(&cfg.instrumentationConfigValues) + } + }) + debugf("central config update: reverted %s to local config", k) + } + if updates != nil { + remote := make(map[string]struct{}) + for k := range attrs { + remote[envName(k)] = struct{}{} + } + t.updateInstrumentationConfig(func(cfg *instrumentationConfig) { + cfg.remote = remote + for _, update := range updates { + update(cfg) + } + }) + } +} + +// instrumentationConfig returns the current instrumentationConfig. +// +// The returned value is immutable. +func (t *Tracer) instrumentationConfig() *instrumentationConfig { + config := atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&t.instrumentationConfigInternal))) + return (*instrumentationConfig)(config) +} + +// setLocalInstrumentationConfig sets local transaction configuration with +// the specified environment variable key. +func (t *Tracer) setLocalInstrumentationConfig(envKey string, f func(cfg *instrumentationConfigValues)) { + t.updateInstrumentationConfig(func(cfg *instrumentationConfig) { + cfg.local[envKey] = f + if _, ok := cfg.remote[envKey]; !ok { + f(&cfg.instrumentationConfigValues) + } + }) +} + +func (t *Tracer) updateInstrumentationConfig(f func(cfg *instrumentationConfig)) { + for { + oldConfig := t.instrumentationConfig() + newConfig := *oldConfig + f(&newConfig) + if atomic.CompareAndSwapPointer( + (*unsafe.Pointer)(unsafe.Pointer(&t.instrumentationConfigInternal)), + unsafe.Pointer(oldConfig), + unsafe.Pointer(&newConfig), + ) { + return + } + } +} + +// instrumentationConfig holds current configuration values, as well as information +// required to revert from remote to local configuration. +type instrumentationConfig struct { + instrumentationConfigValues + + // local holds functions for setting instrumentationConfigValues to the most + // recently, locally specified configuration. + local map[string]func(*instrumentationConfigValues) + + // remote holds the environment variable keys for applied remote config. + remote map[string]struct{} +} + +// instrumentationConfigValues holds configuration that is accessible outside of the +// tracer loop, for instrumentation: StartTransaction, StartSpan, CaptureError, etc. +// +// NOTE(axw) when adding configuration here, you must also update `newTracer` to +// set the initial entry in instrumentationConfig.local, in order to properly reset +// to the local value, even if the default is the zero value. +type instrumentationConfigValues struct { + captureBody CaptureBodyMode + captureHeaders bool + maxSpans int + sampler Sampler + spanFramesMinDuration time.Duration + stackTraceLimit int + propagateLegacyHeader bool +} diff --git a/vendor/go.elastic.co/apm/context.go b/vendor/go.elastic.co/apm/context.go new file mode 100644 index 00000000000..9ab5e93f88a --- /dev/null +++ b/vendor/go.elastic.co/apm/context.go @@ -0,0 +1,256 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apm + +import ( + "fmt" + "net/http" + + "go.elastic.co/apm/internal/apmhttputil" + "go.elastic.co/apm/model" +) + +// Context provides methods for setting transaction and error context. +// +// NOTE this is entirely unrelated to the standard library's context.Context. +type Context struct { + model model.Context + request model.Request + requestBody model.RequestBody + requestSocket model.RequestSocket + response model.Response + user model.User + service model.Service + serviceFramework model.Framework + captureHeaders bool + captureBodyMask CaptureBodyMode +} + +func (c *Context) build() *model.Context { + switch { + case c.model.Request != nil: + case c.model.Response != nil: + case c.model.User != nil: + case c.model.Service != nil: + case len(c.model.Tags) != 0: + case len(c.model.Custom) != 0: + default: + return nil + } + return &c.model +} + +func (c *Context) reset() { + *c = Context{ + model: model.Context{ + Custom: c.model.Custom[:0], + Tags: c.model.Tags[:0], + }, + captureBodyMask: c.captureBodyMask, + request: model.Request{ + Headers: c.request.Headers[:0], + }, + response: model.Response{ + Headers: c.response.Headers[:0], + }, + } +} + +// SetTag calls SetLabel(key, value). +// +// SetTag is deprecated, and will be removed in a future major version. +func (c *Context) SetTag(key, value string) { + c.SetLabel(key, value) +} + +// SetLabel sets a label in the context. +// +// Invalid characters ('.', '*', and '"') in the key will be replaced with +// underscores. +// +// If the value is numerical or boolean, then it will be sent to the server +// as a JSON number or boolean; otherwise it will converted to a string, using +// `fmt.Sprint` if necessary. String values longer than 1024 characters will +// be truncated. +func (c *Context) SetLabel(key string, value interface{}) { + // Note that we do not attempt to de-duplicate the keys. + // This is OK, since json.Unmarshal will always take the + // final instance. + c.model.Tags = append(c.model.Tags, model.IfaceMapItem{ + Key: cleanLabelKey(key), + Value: makeLabelValue(value), + }) +} + +// SetCustom sets custom context. +// +// Invalid characters ('.', '*', and '"') in the key will be +// replaced with an underscore. The value may be any JSON-encodable +// value. +func (c *Context) SetCustom(key string, value interface{}) { + // Note that we do not attempt to de-duplicate the keys. + // This is OK, since json.Unmarshal will always take the + // final instance. + c.model.Custom = append(c.model.Custom, model.IfaceMapItem{ + Key: cleanLabelKey(key), + Value: value, + }) +} + +// SetFramework sets the framework name and version in the context. +// +// This is used for identifying the framework in which the context +// was created, such as Gin or Echo. +// +// If the name is empty, this is a no-op. If version is empty, then +// it will be set to "unspecified". +func (c *Context) SetFramework(name, version string) { + if name == "" { + return + } + if version == "" { + // Framework version is required. + version = "unspecified" + } + c.serviceFramework = model.Framework{ + Name: truncateString(name), + Version: truncateString(version), + } + c.service.Framework = &c.serviceFramework + c.model.Service = &c.service +} + +// SetHTTPRequest sets details of the HTTP request in the context. +// +// This function relates to server-side requests. Various proxy +// forwarding headers are taken into account to reconstruct the URL, +// and determining the client address. +// +// If the request URL contains user info, it will be removed and +// excluded from the URL's "full" field. +// +// If the request contains HTTP Basic Authentication, the username +// from that will be recorded in the context. Otherwise, if the +// request contains user info in the URL (i.e. a client-side URL), +// that will be used. +func (c *Context) SetHTTPRequest(req *http.Request) { + // Special cases to avoid calling into fmt.Sprintf in most cases. + var httpVersion string + switch { + case req.ProtoMajor == 1 && req.ProtoMinor == 1: + httpVersion = "1.1" + case req.ProtoMajor == 2 && req.ProtoMinor == 0: + httpVersion = "2.0" + default: + httpVersion = fmt.Sprintf("%d.%d", req.ProtoMajor, req.ProtoMinor) + } + + c.request = model.Request{ + Body: c.request.Body, + URL: apmhttputil.RequestURL(req), + Method: truncateString(req.Method), + HTTPVersion: httpVersion, + Cookies: req.Cookies(), + } + c.model.Request = &c.request + + if c.captureHeaders { + for k, values := range req.Header { + if k == "Cookie" { + // We capture cookies in the request structure. + continue + } + c.request.Headers = append(c.request.Headers, model.Header{ + Key: k, Values: values, + }) + } + } + + c.requestSocket = model.RequestSocket{ + Encrypted: req.TLS != nil, + RemoteAddress: apmhttputil.RemoteAddr(req), + } + if c.requestSocket != (model.RequestSocket{}) { + c.request.Socket = &c.requestSocket + } + + username, _, ok := req.BasicAuth() + if !ok && req.URL.User != nil { + username = req.URL.User.Username() + } + c.user.Username = truncateString(username) + if c.user.Username != "" { + c.model.User = &c.user + } +} + +// SetHTTPRequestBody sets the request body in context given a (possibly nil) +// BodyCapturer returned by Tracer.CaptureHTTPRequestBody. +func (c *Context) SetHTTPRequestBody(bc *BodyCapturer) { + if bc == nil || bc.captureBody&c.captureBodyMask == 0 { + return + } + if bc.setContext(&c.requestBody) { + c.request.Body = &c.requestBody + } +} + +// SetHTTPResponseHeaders sets the HTTP response headers in the context. +func (c *Context) SetHTTPResponseHeaders(h http.Header) { + if !c.captureHeaders { + return + } + for k, values := range h { + c.response.Headers = append(c.response.Headers, model.Header{ + Key: k, Values: values, + }) + } + if len(c.response.Headers) != 0 { + c.model.Response = &c.response + } +} + +// SetHTTPStatusCode records the HTTP response status code. +func (c *Context) SetHTTPStatusCode(statusCode int) { + c.response.StatusCode = statusCode + c.model.Response = &c.response +} + +// SetUserID sets the ID of the authenticated user. +func (c *Context) SetUserID(id string) { + c.user.ID = truncateString(id) + if c.user.ID != "" { + c.model.User = &c.user + } +} + +// SetUserEmail sets the email for the authenticated user. +func (c *Context) SetUserEmail(email string) { + c.user.Email = truncateString(email) + if c.user.Email != "" { + c.model.User = &c.user + } +} + +// SetUsername sets the username of the authenticated user. +func (c *Context) SetUsername(username string) { + c.user.Username = truncateString(username) + if c.user.Username != "" { + c.model.User = &c.user + } +} diff --git a/vendor/go.elastic.co/apm/doc.go b/vendor/go.elastic.co/apm/doc.go new file mode 100644 index 00000000000..6ca1ac8b26a --- /dev/null +++ b/vendor/go.elastic.co/apm/doc.go @@ -0,0 +1,21 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package apm provides an API for tracing +// transactions and capturing errors, sending the +// data to Elastic APM. +package apm // import "go.elastic.co/apm" diff --git a/vendor/go.elastic.co/apm/error.go b/vendor/go.elastic.co/apm/error.go new file mode 100644 index 00000000000..fcfd1b66543 --- /dev/null +++ b/vendor/go.elastic.co/apm/error.go @@ -0,0 +1,696 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apm + +import ( + "crypto/rand" + "fmt" + "net" + "os" + "reflect" + "syscall" + "time" + + "github.com/pkg/errors" + + "go.elastic.co/apm/internal/pkgerrorsutil" + "go.elastic.co/apm/model" + "go.elastic.co/apm/stacktrace" +) + +const ( + // maxErrorGraphSize is the maximum number of errors + // to report in an error tree. Once this number of + // nodes is reached, we will stop recursing through + // error causes. + maxErrorTreeNodes = 50 +) + +// Recovered creates an Error with t.NewError(err), where +// err is either v (if v implements error), or otherwise +// fmt.Errorf("%v", v). The value v is expected to have +// come from a panic. +func (t *Tracer) Recovered(v interface{}) *Error { + var e *Error + switch v := v.(type) { + case error: + e = t.NewError(v) + default: + e = t.NewError(fmt.Errorf("%v", v)) + } + return e +} + +// NewError returns a new Error with details taken from err. +// NewError will panic if called with a nil error. +// +// The exception message will be set to err.Error(). +// The exception module and type will be set to the package +// and type name of the cause of the error, respectively, +// where the cause has the same definition as given by +// github.com/pkg/errors. +// +// If err implements +// type interface { +// StackTrace() github.com/pkg/errors.StackTrace +// } +// or +// type interface { +// StackTrace() []stacktrace.Frame +// } +// then one of those will be used to set the error +// stacktrace. Otherwise, NewError will take a stacktrace. +// +// If err implements +// type interface {Type() string} +// then that will be used to set the error type. +// +// If err implements +// type interface {Code() string} +// or +// type interface {Code() float64} +// then one of those will be used to set the error code. +func (t *Tracer) NewError(err error) *Error { + if err == nil { + panic("NewError must be called with a non-nil error") + } + e := t.newError() + e.cause = err + e.err = err.Error() + rand.Read(e.ID[:]) // ignore error, can't do anything about it + initException(&e.exception, err, e.stackTraceLimit) + if len(e.exception.stacktrace) == 0 { + e.SetStacktrace(2) + } + return e +} + +// NewErrorLog returns a new Error for the given ErrorLogRecord. +// +// The resulting Error's stacktrace will not be set. Call the +// SetStacktrace method to set it, if desired. +// +// If r.Message is empty, "[EMPTY]" will be used. +func (t *Tracer) NewErrorLog(r ErrorLogRecord) *Error { + e := t.newError() + e.log = ErrorLogRecord{ + Message: truncateString(r.Message), + MessageFormat: truncateString(r.MessageFormat), + Level: truncateString(r.Level), + LoggerName: truncateString(r.LoggerName), + } + if e.log.Message == "" { + e.log.Message = "[EMPTY]" + } + e.cause = r.Error + e.err = e.log.Message + rand.Read(e.ID[:]) // ignore error, can't do anything about it + if r.Error != nil { + initException(&e.exception, r.Error, e.stackTraceLimit) + } + return e +} + +// newError returns a new Error associated with the Tracer. +func (t *Tracer) newError() *Error { + e, _ := t.errorDataPool.Get().(*ErrorData) + if e == nil { + e = &ErrorData{ + tracer: t, + Context: Context{ + captureBodyMask: CaptureBodyErrors, + }, + } + } + e.Timestamp = time.Now() + + instrumentationConfig := t.instrumentationConfig() + e.Context.captureHeaders = instrumentationConfig.captureHeaders + e.stackTraceLimit = instrumentationConfig.stackTraceLimit + + return &Error{ErrorData: e} +} + +// Error describes an error occurring in the monitored service. +type Error struct { + // ErrorData holds the error data. This field is set to nil when + // the error's Send method is called. + *ErrorData + + // cause holds the original error. + // + // It is accessible via the Cause method: + // https://godoc.org/github.com/pkg/errors#Cause + cause error + + // string holds original error string + err string +} + +// ErrorData holds the details for an error, and is embedded inside Error. +// When the error is sent, its ErrorData field will be set to nil. +type ErrorData struct { + tracer *Tracer + stackTraceLimit int + exception exceptionData + log ErrorLogRecord + logStacktrace []stacktrace.Frame + transactionSampled bool + transactionType string + + // ID is the unique identifier of the error. This is set by + // the various error constructors, and is exposed only so + // the error ID can be logged or displayed to the user. + ID ErrorID + + // TraceID is the unique identifier of the trace in which + // this error occurred. If the error is not associated with + // a trace, this will be the zero value. + TraceID TraceID + + // TransactionID is the unique identifier of the transaction + // in which this error occurred. If the error is not associated + // with a transaction, this will be the zero value. + TransactionID SpanID + + // ParentID is the unique identifier of the transaction or span + // in which this error occurred. If the error is not associated + // with a transaction or span, this will be the zero value. + ParentID SpanID + + // Culprit is the name of the function that caused the error. + // + // This is initially unset; if it remains unset by the time + // Send is invoked, and the error has a stacktrace, the first + // non-library frame in the stacktrace will be considered the + // culprit. + Culprit string + + // Timestamp records the time at which the error occurred. + // This is set when the Error object is created, but may + // be overridden any time before the Send method is called. + Timestamp time.Time + + // Handled records whether or not the error was handled. This + // is ignored by "log" errors with no associated error value. + Handled bool + + // Context holds the context for this error. + Context Context +} + +// Cause returns original error assigned to Error, nil if Error or Error.cause is nil. +// https://godoc.org/github.com/pkg/errors#Cause +func (e *Error) Cause() error { + if e != nil { + return e.cause + } + return nil +} + +// Error returns string message for error. +// if Error or Error.cause is nil, "[EMPTY]" will be used. +func (e *Error) Error() string { + if e != nil { + return e.err + } + return "[EMPTY]" +} + +// SetTransaction sets TraceID, TransactionID, and ParentID to the transaction's +// IDs, and records the transaction's Type and whether or not it was sampled. +// +// If any custom context has been recorded in tx, it will also be carried across +// to e, but will not override any custom context already recorded on e. +func (e *Error) SetTransaction(tx *Transaction) { + tx.mu.RLock() + traceContext := tx.traceContext + var txType string + var custom model.IfaceMap + if !tx.ended() { + txType = tx.Type + custom = tx.Context.model.Custom + } + tx.mu.RUnlock() + e.setSpanData(traceContext, traceContext.Span, txType, custom) +} + +// SetSpan sets TraceID, TransactionID, and ParentID to the span's IDs. +// +// There is no need to call both SetTransaction and SetSpan. If you do call +// both, then SetSpan must be called second in order to set the error's +// ParentID correctly. +// +// If any custom context has been recorded in s's transaction, it will +// also be carried across to e, but will not override any custom context +// already recorded on e. +func (e *Error) SetSpan(s *Span) { + var txType string + var custom model.IfaceMap + if s.tx != nil { + s.tx.mu.RLock() + if !s.tx.ended() { + txType = s.tx.Type + custom = s.tx.Context.model.Custom + } + s.tx.mu.RUnlock() + } + e.setSpanData(s.traceContext, s.transactionID, txType, custom) +} + +func (e *Error) setSpanData( + traceContext TraceContext, + transactionID SpanID, + transactionType string, + customContext model.IfaceMap, +) { + e.TraceID = traceContext.Trace + e.ParentID = traceContext.Span + e.TransactionID = transactionID + e.transactionSampled = traceContext.Options.Recorded() + if e.transactionSampled { + e.transactionType = transactionType + } + if n := len(customContext); n != 0 { + m := len(e.Context.model.Custom) + e.Context.model.Custom = append(e.Context.model.Custom, customContext...) + // If there was already custom context in e, shift the custom context from + // tx to the beginning of the slice so that e's context takes precedence. + if m != 0 { + copy(e.Context.model.Custom[n:], e.Context.model.Custom[:m]) + copy(e.Context.model.Custom[:n], customContext) + } + } +} + +// Send enqueues the error for sending to the Elastic APM server. +// +// Send will set e.ErrorData to nil, so the error must not be +// modified after Send returns. +func (e *Error) Send() { + if e == nil || e.sent() { + return + } + e.ErrorData.enqueue() + e.ErrorData = nil +} + +func (e *Error) sent() bool { + return e.ErrorData == nil +} + +func (e *ErrorData) enqueue() { + select { + case e.tracer.events <- tracerEvent{eventType: errorEvent, err: e}: + default: + // Enqueuing an error should never block. + e.tracer.statsMu.Lock() + e.tracer.stats.ErrorsDropped++ + e.tracer.statsMu.Unlock() + e.reset() + } +} + +func (e *ErrorData) reset() { + *e = ErrorData{ + tracer: e.tracer, + logStacktrace: e.logStacktrace[:0], + Context: e.Context, + exception: e.exception, + } + e.Context.reset() + e.exception.reset() + e.tracer.errorDataPool.Put(e) +} + +type exceptionData struct { + message string + stacktrace []stacktrace.Frame + cause []exceptionData + ErrorDetails +} + +func (e *exceptionData) reset() { + *e = exceptionData{ + cause: e.cause[:0], + stacktrace: e.stacktrace[:0], + ErrorDetails: ErrorDetails{ + attrs: e.ErrorDetails.attrs, + Cause: e.ErrorDetails.Cause[:0], + }, + } + for k := range e.attrs { + delete(e.attrs, k) + } +} + +func initException(e *exceptionData, err error, stackTraceLimit int) { + b := exceptionDataBuilder{stackTraceLimit: stackTraceLimit} + b.init(e, err) +} + +type exceptionDataBuilder struct { + stackTraceLimit int + errorCount int + pointerErrors map[uintptr]struct{} +} + +func (b *exceptionDataBuilder) init(e *exceptionData, err error) bool { + b.errorCount++ + reflectValue := reflect.ValueOf(err) + reflectType := reflectValue.Type() + switch reflectType.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + // Prevent infinite recursion due to cyclic error causes. + ptrVal := reflectValue.Pointer() + if b.pointerErrors == nil { + b.pointerErrors = map[uintptr]struct{}{ptrVal: struct{}{}} + } else { + if _, ok := b.pointerErrors[ptrVal]; ok { + return false + } + b.pointerErrors[ptrVal] = struct{}{} + } + } + + e.message = truncateString(err.Error()) + if e.message == "" { + e.message = "[EMPTY]" + } + + namedType := reflectType + if reflectType.Name() == "" && reflectType.Kind() == reflect.Ptr { + namedType = reflectType.Elem() + } + e.Type.Name = namedType.Name() + e.Type.PackagePath = namedType.PkgPath() + + // If the error implements Type, use that to + // override the type name determined through + // reflection. + if err, ok := err.(interface { + Type() string + }); ok { + e.Type.Name = err.Type() + } + + // If the error implements a Code method, use + // that to set the exception code. + switch err := err.(type) { + case interface { + Code() string + }: + e.Code.String = err.Code() + case interface { + Code() float64 + }: + e.Code.Number = err.Code() + } + + // If the error implements an Unwrap or Cause method, use that to set the cause error. + // Unwrap is defined by errors wrapped using fmt.Errorf, while Cause is defined by + // errors wrapped using pkg/errors.Wrap. + switch err := err.(type) { + case interface{ Unwrap() error }: + if cause := err.Unwrap(); cause != nil { + e.ErrorDetails.Cause = append(e.ErrorDetails.Cause, cause) + } + case interface{ Cause() error }: + if cause := err.Cause(); cause != nil { + e.ErrorDetails.Cause = append(e.ErrorDetails.Cause, cause) + } + } + + // Run registered ErrorDetailers over the error. + for _, ed := range typeErrorDetailers[reflectType] { + ed.ErrorDetails(err, &e.ErrorDetails) + } + for _, ed := range errorDetailers { + ed.ErrorDetails(err, &e.ErrorDetails) + } + + e.Code.String = truncateString(e.Code.String) + e.Type.Name = truncateString(e.Type.Name) + e.Type.PackagePath = truncateString(e.Type.PackagePath) + b.initErrorStacktrace(&e.stacktrace, err) + + for _, err := range e.ErrorDetails.Cause { + if b.errorCount >= maxErrorTreeNodes { + break + } + var data exceptionData + if b.init(&data, err) { + e.cause = append(e.cause, data) + } + } + return true +} + +func (b *exceptionDataBuilder) initErrorStacktrace(out *[]stacktrace.Frame, err error) { + type internalStackTracer interface { + StackTrace() []stacktrace.Frame + } + type errorsStackTracer interface { + StackTrace() errors.StackTrace + } + switch stackTracer := err.(type) { + case internalStackTracer: + stackTrace := stackTracer.StackTrace() + if b.stackTraceLimit >= 0 && len(stackTrace) > b.stackTraceLimit { + stackTrace = stackTrace[:b.stackTraceLimit] + } + *out = append(*out, stackTrace...) + case errorsStackTracer: + stackTrace := stackTracer.StackTrace() + pkgerrorsutil.AppendStacktrace(stackTrace, out, b.stackTraceLimit) + } +} + +// SetStacktrace sets the stacktrace for the error, +// skipping the first skip number of frames, excluding +// the SetStacktrace function. +func (e *Error) SetStacktrace(skip int) { + out := &e.exception.stacktrace + if e.log.Message != "" { + out = &e.logStacktrace + } + *out = stacktrace.AppendStacktrace((*out)[:0], skip+1, e.stackTraceLimit) +} + +// ErrorLogRecord holds details of an error log record. +type ErrorLogRecord struct { + // Message holds the message for the log record, + // e.g. "failed to connect to %s". + // + // If this is empty, "[EMPTY]" will be used. + Message string + + // MessageFormat holds the non-interpolated format + // of the log record, e.g. "failed to connect to %s". + // + // This is optional. + MessageFormat string + + // Level holds the severity level of the log record. + // + // This is optional. + Level string + + // LoggerName holds the name of the logger used. + // + // This is optional. + LoggerName string + + // Error is an error associated with the log record. + // + // This is optional. + Error error +} + +// ErrorID uniquely identifies an error. +type ErrorID TraceID + +// String returns id in its hex-encoded format. +func (id ErrorID) String() string { + return TraceID(id).String() +} + +func init() { + RegisterErrorDetailer(ErrorDetailerFunc(func(err error, details *ErrorDetails) { + if errTemporary(err) { + details.SetAttr("temporary", true) + } + if errTimeout(err) { + details.SetAttr("timeout", true) + } + })) + RegisterTypeErrorDetailer(reflect.TypeOf(&net.OpError{}), ErrorDetailerFunc(func(err error, details *ErrorDetails) { + opErr := err.(*net.OpError) + details.SetAttr("op", opErr.Op) + details.SetAttr("net", opErr.Net) + if opErr.Source != nil { + if addr := opErr.Source; addr != nil { + details.SetAttr("source", fmt.Sprintf("%s:%s", addr.Network(), addr.String())) + } + } + if opErr.Addr != nil { + if addr := opErr.Addr; addr != nil { + details.SetAttr("addr", fmt.Sprintf("%s:%s", addr.Network(), addr.String())) + } + } + details.Cause = append(details.Cause, opErr.Err) + })) + RegisterTypeErrorDetailer(reflect.TypeOf(&os.LinkError{}), ErrorDetailerFunc(func(err error, details *ErrorDetails) { + linkErr := err.(*os.LinkError) + details.SetAttr("op", linkErr.Op) + details.SetAttr("old", linkErr.Old) + details.SetAttr("new", linkErr.New) + details.Cause = append(details.Cause, linkErr.Err) + })) + RegisterTypeErrorDetailer(reflect.TypeOf(&os.PathError{}), ErrorDetailerFunc(func(err error, details *ErrorDetails) { + pathErr := err.(*os.PathError) + details.SetAttr("op", pathErr.Op) + details.SetAttr("path", pathErr.Path) + details.Cause = append(details.Cause, pathErr.Err) + })) + RegisterTypeErrorDetailer(reflect.TypeOf(&os.SyscallError{}), ErrorDetailerFunc(func(err error, details *ErrorDetails) { + syscallErr := err.(*os.SyscallError) + details.SetAttr("syscall", syscallErr.Syscall) + details.Cause = append(details.Cause, syscallErr.Err) + })) + RegisterTypeErrorDetailer(reflect.TypeOf(syscall.Errno(0)), ErrorDetailerFunc(func(err error, details *ErrorDetails) { + errno := err.(syscall.Errno) + details.Code.String = errnoName(errno) + if details.Code.String == "" { + details.Code.Number = float64(errno) + } + })) +} + +func errTemporary(err error) bool { + type temporaryError interface { + Temporary() bool + } + terr, ok := err.(temporaryError) + return ok && terr.Temporary() +} + +func errTimeout(err error) bool { + type timeoutError interface { + Timeout() bool + } + terr, ok := err.(timeoutError) + return ok && terr.Timeout() +} + +// RegisterTypeErrorDetailer registers e to be called for any error with +// the concrete type t. +// +// Each ErrorDetailer registered in this way will be called, in the order +// registered, for each error of type t created via Tracer.NewError or +// Tracer.NewErrorLog. +// +// RegisterTypeErrorDetailer must not be called during tracer operation; +// it is intended to be called at package init time. +func RegisterTypeErrorDetailer(t reflect.Type, e ErrorDetailer) { + typeErrorDetailers[t] = append(typeErrorDetailers[t], e) +} + +// RegisterErrorDetailer registers e in the global list of ErrorDetailers. +// +// Each ErrorDetailer registered in this way will be called, in the order +// registered, for each error created via Tracer.NewError or Tracer.NewErrorLog. +// +// RegisterErrorDetailer must not be called during tracer operation; it is +// intended to be called at package init time. +func RegisterErrorDetailer(e ErrorDetailer) { + errorDetailers = append(errorDetailers, e) +} + +var ( + typeErrorDetailers = make(map[reflect.Type][]ErrorDetailer) + errorDetailers []ErrorDetailer +) + +// ErrorDetails holds details of an error, which can be altered or +// extended by registering an ErrorDetailer with RegisterErrorDetailer +// or RegisterTypeErrorDetailer. +type ErrorDetails struct { + attrs map[string]interface{} + + // Type holds information about the error type, initialized + // with the type name and type package path using reflection. + Type struct { + // Name holds the error type name. + Name string + + // PackagePath holds the error type package path. + PackagePath string + } + + // Code holds an error code. + Code struct { + // String holds a string-based error code. If this is set, then Number is ignored. + // + // This field will be initialized to the result of calling an error's Code method, + // if the error implements the following interface: + // + // type interface StringCoder { + // Code() string + // } + String string + + // Number holds a numerical error code. This is ignored if String is set. + // + // This field will be initialized to the result of calling an error's Code + // method, if the error implements the following interface: + // + // type interface NumberCoder { + // Code() float64 + // } + Number float64 + } + + // Cause holds the errors that were the cause of this error. + Cause []error +} + +// SetAttr sets the attribute with key k to value v. +func (d *ErrorDetails) SetAttr(k string, v interface{}) { + if d.attrs == nil { + d.attrs = make(map[string]interface{}) + } + d.attrs[k] = v +} + +// ErrorDetailer defines an interface for altering or extending the ErrorDetails for an error. +// +// ErrorDetailers can be registered using the package-level functions RegisterErrorDetailer and +// RegisterTypeErrorDetailer. +type ErrorDetailer interface { + // ErrorDetails is called to update or alter details for err. + ErrorDetails(err error, details *ErrorDetails) +} + +// ErrorDetailerFunc is a function type implementing ErrorDetailer. +type ErrorDetailerFunc func(error, *ErrorDetails) + +// ErrorDetails calls f(err, details). +func (f ErrorDetailerFunc) ErrorDetails(err error, details *ErrorDetails) { + f(err, details) +} diff --git a/vendor/go.elastic.co/apm/error_unix.go b/vendor/go.elastic.co/apm/error_unix.go new file mode 100644 index 00000000000..e54f301612d --- /dev/null +++ b/vendor/go.elastic.co/apm/error_unix.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build !windows + +package apm + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +func errnoName(err syscall.Errno) string { + return unix.ErrnoName(err) +} diff --git a/vendor/go.elastic.co/apm/error_windows.go b/vendor/go.elastic.co/apm/error_windows.go new file mode 100644 index 00000000000..e95ac0f248d --- /dev/null +++ b/vendor/go.elastic.co/apm/error_windows.go @@ -0,0 +1,27 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apm + +import ( + "syscall" +) + +func errnoName(err syscall.Errno) string { + // There's currently no equivalent of unix.ErrnoName for Windows. + return "" +} diff --git a/vendor/go.elastic.co/apm/fmt.go b/vendor/go.elastic.co/apm/fmt.go new file mode 100644 index 00000000000..4d1ce03577b --- /dev/null +++ b/vendor/go.elastic.co/apm/fmt.go @@ -0,0 +1,85 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apm + +import ( + "context" + "fmt" + "io" +) + +// TraceFormatter returns a fmt.Formatter that can be used to +// format the identifiers of the transaction and span in ctx. +// +// The returned Formatter understands the following verbs: +// +// %v: trace ID, transaction ID, and span ID (if existing), space-separated +// the plus flag (%+v) adds field names, e.g. "trace.id=... transaction.id=..." +// %t: trace ID (hex-encoded, or empty string if non-existent) +// the plus flag (%+T) adds the field name, e.g. "trace.id=..." +// %x: transaction ID (hex-encoded, or empty string if non-existent) +// the plus flag (%+t) adds the field name, e.g. "transaction.id=..." +// %s: span ID (hex-encoded, or empty string if non-existent) +// the plus flag (%+s) adds the field name, e.g. "span.id=..." +func TraceFormatter(ctx context.Context) fmt.Formatter { + f := traceFormatter{tx: TransactionFromContext(ctx)} + if f.tx != nil { + f.span = SpanFromContext(ctx) + } + return f +} + +type traceFormatter struct { + tx *Transaction + span *Span +} + +func (t traceFormatter) Format(f fmt.State, c rune) { + switch c { + case 'v': + if t.tx != nil { + t.writeField(f, "trace.id", t.tx.TraceContext().Trace.String()) + f.Write([]byte{' '}) + t.writeField(f, "transaction.id", t.tx.TraceContext().Span.String()) + if t.span != nil { + f.Write([]byte{' '}) + t.writeField(f, "span.id", t.span.TraceContext().Span.String()) + } + } + case 't': + if t.tx != nil { + t.writeField(f, "trace.id", t.tx.TraceContext().Trace.String()) + } + case 'x': + if t.tx != nil { + t.writeField(f, "transaction.id", t.tx.TraceContext().Span.String()) + } + case 's': + if t.span != nil { + t.writeField(f, "span.id", t.span.TraceContext().Span.String()) + } + } +} + +func (t traceFormatter) writeField(f fmt.State, name, value string) { + if f.Flag('+') { + io.WriteString(f, name) + f.Write([]byte{'='}) + } + io.WriteString(f, value) +} diff --git a/vendor/go.elastic.co/apm/fnv.go b/vendor/go.elastic.co/apm/fnv.go new file mode 100644 index 00000000000..0741e224f26 --- /dev/null +++ b/vendor/go.elastic.co/apm/fnv.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Based on Go's pkg/hash/fnv. +// +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package apm + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +type fnv1a uint64 + +func newFnv1a() fnv1a { + return offset64 +} + +func (f *fnv1a) add(s string) { + for i := 0; i < len(s); i++ { + *f ^= fnv1a(s[i]) + *f *= prime64 + } +} diff --git a/vendor/go.elastic.co/apm/go.mod b/vendor/go.elastic.co/apm/go.mod new file mode 100644 index 00000000000..c7f1c4bbe5f --- /dev/null +++ b/vendor/go.elastic.co/apm/go.mod @@ -0,0 +1,17 @@ +module go.elastic.co/apm + +require ( + github.com/armon/go-radix v1.0.0 + github.com/cucumber/godog v0.8.1 + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/elastic/go-sysinfo v1.1.1 + github.com/google/go-cmp v0.3.1 + github.com/pkg/errors v0.8.1 + github.com/prometheus/procfs v0.0.3 // indirect + github.com/santhosh-tekuri/jsonschema v1.2.4 + github.com/stretchr/testify v1.4.0 + go.elastic.co/fastjson v1.0.0 + golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e +) + +go 1.13 diff --git a/vendor/go.elastic.co/apm/go.sum b/vendor/go.elastic.co/apm/go.sum new file mode 100644 index 00000000000..6f89a3c9558 --- /dev/null +++ b/vendor/go.elastic.co/apm/go.sum @@ -0,0 +1,53 @@ +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/cucumber/godog v0.8.1 h1:lVb+X41I4YDreE+ibZ50bdXmySxgRviYFgKY6Aw4XE8= +github.com/cucumber/godog v0.8.1/go.mod h1:vSh3r/lM+psC1BPXvdkSEuNjmXfpVqrMGYAElF6hxnA= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/elastic/go-sysinfo v1.1.1 h1:ZVlaLDyhVkDfjwPGU55CQRCRolNpc7P0BbyhhQZQmMI= +github.com/elastic/go-sysinfo v1.1.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= +github.com/elastic/go-windows v1.0.0 h1:qLURgZFkkrYyTTkvYpsZIgf83AUsdIHfvlJaqaZ7aSY= +github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/santhosh-tekuri/jsonschema v1.2.4 h1:hNhW8e7t+H1vgY+1QeEQpveR6D4+OwKPXCfD2aieJis= +github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +go.elastic.co/fastjson v1.0.0 h1:ooXV/ABvf+tBul26jcVViPT3sBir0PvXgibYB1IQQzg= +go.elastic.co/fastjson v1.0.0/go.mod h1:PmeUOMMtLHQr9ZS9J9owrAVg0FkaZDRZJEFTTGHtchs= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e h1:9vRrk9YW2BTzLP0VCB9ZDjU4cPqkg+IDWL7XgxA1yxQ= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= +howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= diff --git a/vendor/go.elastic.co/apm/gocontext.go b/vendor/go.elastic.co/apm/gocontext.go new file mode 100644 index 00000000000..d238c065669 --- /dev/null +++ b/vendor/go.elastic.co/apm/gocontext.go @@ -0,0 +1,138 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apm + +import ( + "context" + + "go.elastic.co/apm/internal/apmcontext" +) + +// ContextWithSpan returns a copy of parent in which the given span +// is stored, associated with the key ContextSpanKey. +func ContextWithSpan(parent context.Context, s *Span) context.Context { + return apmcontext.ContextWithSpan(parent, s) +} + +// ContextWithTransaction returns a copy of parent in which the given +// transaction is stored, associated with the key ContextTransactionKey. +func ContextWithTransaction(parent context.Context, t *Transaction) context.Context { + return apmcontext.ContextWithTransaction(parent, t) +} + +// SpanFromContext returns the current Span in context, if any. The span must +// have been added to the context previously using ContextWithSpan, or the +// top-level StartSpan function. +func SpanFromContext(ctx context.Context) *Span { + value, _ := apmcontext.SpanFromContext(ctx).(*Span) + return value +} + +// TransactionFromContext returns the current Transaction in context, if any. +// The transaction must have been added to the context previously using +// ContextWithTransaction. +func TransactionFromContext(ctx context.Context) *Transaction { + value, _ := apmcontext.TransactionFromContext(ctx).(*Transaction) + return value +} + +// DetachedContext returns a new context detached from the lifetime +// of ctx, but which still returns the values of ctx. +// +// DetachedContext can be used to maintain the trace context required +// to correlate events, but where the operation is "fire-and-forget", +// and should not be affected by the deadline or cancellation of ctx. +func DetachedContext(ctx context.Context) context.Context { + return &detachedContext{Context: context.Background(), orig: ctx} +} + +type detachedContext struct { + context.Context + orig context.Context +} + +// Value returns c.orig.Value(key). +func (c *detachedContext) Value(key interface{}) interface{} { + return c.orig.Value(key) +} + +// StartSpan is equivalent to calling StartSpanOptions with a zero SpanOptions struct. +func StartSpan(ctx context.Context, name, spanType string) (*Span, context.Context) { + return StartSpanOptions(ctx, name, spanType, SpanOptions{}) +} + +// StartSpanOptions starts and returns a new Span within the sampled transaction +// and parent span in the context, if any. If the span isn't dropped, it will be +// stored in the resulting context. +// +// If opts.Parent is non-zero, its value will be used in preference to any parent +// span in ctx. +// +// StartSpanOptions always returns a non-nil Span. Its End method must be called +// when the span completes. +func StartSpanOptions(ctx context.Context, name, spanType string, opts SpanOptions) (*Span, context.Context) { + var span *Span + if opts.parent = SpanFromContext(ctx); opts.parent != nil { + if opts.parent.tx == nil && opts.parent.tracer != nil { + span = opts.parent.tracer.StartSpan(name, spanType, opts.parent.transactionID, opts) + } else { + span = opts.parent.tx.StartSpanOptions(name, spanType, opts) + } + } else { + tx := TransactionFromContext(ctx) + span = tx.StartSpanOptions(name, spanType, opts) + } + if !span.Dropped() { + ctx = ContextWithSpan(ctx, span) + } + return span, ctx +} + +// CaptureError returns a new Error related to the sampled transaction +// and span present in the context, if any, and sets its exception info +// from err. The Error.Handled field will be set to true, and a stacktrace +// set either from err, or from the caller. +// +// If the provided error is nil, then CaptureError will also return nil; +// otherwise a non-nil Error will always be returned. If there is no +// transaction or span in the context, then the returned Error's Send +// method will have no effect. +func CaptureError(ctx context.Context, err error) *Error { + if err == nil { + return nil + } + if span := SpanFromContext(ctx); span != nil { + if span.tracer == nil { + return &Error{cause: err, err: err.Error()} + } + e := span.tracer.NewError(err) + e.Handled = true + e.SetSpan(span) + return e + } else if tx := TransactionFromContext(ctx); tx != nil { + if tx.tracer == nil { + return &Error{cause: err, err: err.Error()} + } + e := tx.tracer.NewError(err) + e.Handled = true + e.SetTransaction(tx) + return e + } else { + return &Error{cause: err, err: err.Error()} + } +} diff --git a/vendor/go.elastic.co/apm/gofuzz.go b/vendor/go.elastic.co/apm/gofuzz.go new file mode 100644 index 00000000000..1fbbcaf3384 --- /dev/null +++ b/vendor/go.elastic.co/apm/gofuzz.go @@ -0,0 +1,270 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build gofuzz + +package apm + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/santhosh-tekuri/jsonschema" + + "go.elastic.co/apm/internal/apmschema" + "go.elastic.co/apm/model" + "go.elastic.co/apm/stacktrace" + "go.elastic.co/fastjson" +) + +func Fuzz(data []byte) int { + type Payload struct { + Service *model.Service `json:"service"` + Process *model.Process `json:"process,omitempty"` + System *model.System `json:"system,omitempty"` + Errors []*model.Error `json:"errors"` + Transactions []*model.Transaction `json:"transactions"` + } + var payload Payload + if err := json.Unmarshal(data, &payload); err != nil { + return 0 + } + + tracer := DefaultTracer + tracer.Transport = &gofuzzTransport{} + tracer.SetCaptureBody(CaptureBodyAll) + + setContext := func(in *model.Context, out *Context) error { + if in == nil { + return nil + } + for k, v := range in.Tags { + out.SetLabel(k, v) + } + if in.Request != nil { + var body io.Reader + var postForm url.Values + if in.Request.Body != nil { + body = strings.NewReader(in.Request.Body.Raw) + if in.Request.Body.Form != nil { + postForm = in.Request.Body.Form + } + } + req, err := http.NewRequest(in.Request.Method, in.Request.URL.Full, body) + if err != nil { + return err + } + capturedBody := tracer.CaptureHTTPRequestBody(req) + if in.Request.Socket != nil { + req.RemoteAddr = in.Request.Socket.RemoteAddress + if in.Request.Socket.Encrypted { + req.TLS = new(tls.ConnectionState) + } + } + req.PostForm = postForm + if in.User != nil && in.User.Username != "" { + req.SetBasicAuth(in.User.Username, "") + } + + var major, minor int + if n, err := fmt.Sscanf(in.Request.HTTPVersion, "%d.%d", &major, &minor); err != nil { + return err + } else if n != 2 { + return errors.Errorf("invalid HTTP version %s", in.Request.HTTPVersion) + } + req.ProtoMajor = major + req.ProtoMinor = minor + + if in.Request.Headers != nil { + if in.Request.Headers.UserAgent != "" { + req.Header.Set("User-Agent", in.Request.Headers.UserAgent) + } + if in.Request.Headers.ContentType != "" { + req.Header.Set("Content-Type", in.Request.Headers.ContentType) + } + if in.Request.Headers.Cookie != "" { + for _, v := range strings.Split(in.Request.Headers.Cookie, ";") { + req.Header.Add("Cookie", v) + } + } + } + + out.SetHTTPRequest(req) + out.SetHTTPRequestBody(capturedBody) + } + if in.Response != nil { + out.SetHTTPStatusCode(in.Response.StatusCode) + if in.Response.Finished != nil { + out.SetHTTPResponseFinished(*in.Response.Finished) + } + if in.Response.HeadersSent != nil { + out.SetHTTPResponseHeadersSent(*in.Response.HeadersSent) + } + if in.Response.Headers != nil { + h := make(http.Header) + h.Set("Content-Type", in.Response.Headers.ContentType) + out.SetHTTPResponseHeaders(h) + } + } + return nil + } + + for _, t := range payload.Transactions { + if t == nil { + continue + } + tx := tracer.StartTransaction(t.Name, t.Type) + tx.Result = t.Result + tx.Timestamp = time.Time(t.Timestamp) + if setContext(t.Context, &tx.Context) != nil { + return 0 + } + for _, s := range t.Spans { + span := tx.StartSpan(s.Name, s.Type, nil) + span.Timestamp = tx.Timestamp.Add(time.Duration(s.Start * float64(time.Millisecond))) + if s.Context != nil && s.Context.Database != nil { + span.Context.SetDatabase(DatabaseSpanContext{ + Instance: s.Context.Database.Instance, + Statement: s.Context.Database.Statement, + Type: s.Context.Database.Type, + User: s.Context.Database.User, + }) + } + span.Duration = time.Duration(s.Duration * float64(time.Millisecond)) + span.End() + } + tx.Duration = time.Duration(t.Duration * float64(time.Millisecond)) + tx.End() + } + + for _, e := range payload.Errors { + if e == nil { + continue + } + var err *Error + if e.Log.Message != "" { + err = tracer.NewErrorLog(ErrorLogRecord{ + Message: e.Log.Message, + MessageFormat: e.Log.ParamMessage, + Level: e.Log.Level, + LoggerName: e.Log.LoggerName, + }) + } else { + ee := exceptionError{e.Exception} + if e.Exception.Code.String != "" { + err = tracer.NewError(stringCodeException{ee}) + } else { + err = tracer.NewError(float64CodeException{ee}) + } + } + if setContext(e.Context, &err.Context) != nil { + return 0 + } + err.Culprit = e.Culprit + err.Timestamp = time.Time(e.Timestamp) + err.Send() + } + + return 0 +} + +type float64CodeException struct { + exceptionError +} + +func (e float64CodeException) Code() float64 { + return e.x.Code.Number +} + +type stringCodeException struct { + exceptionError +} + +func (e stringCodeException) Code() string { + return e.x.Code.String +} + +type exceptionError struct { + x model.Exception +} + +func (e exceptionError) Type() string { + return e.x.Type +} + +func (e exceptionError) Error() string { + return e.x.Message +} + +func (e exceptionError) StackTrace() []stacktrace.Frame { + if len(e.x.Stacktrace) == 0 { + return nil + } + frames := make([]stacktrace.Frame, len(e.x.Stacktrace)) + for i, f := range e.x.Stacktrace { + frames[i].Function = f.Function + frames[i].File = f.File + frames[i].Line = f.Line + } + return frames +} + +type gofuzzTransport struct { + writer fastjson.Writer +} + +func (t *gofuzzTransport) SendErrors(ctx context.Context, payload *model.ErrorsPayload) error { + t.writer.Reset() + if err := payload.MarshalFastJSON(&t.writer); err != nil { + return err + } + t.validate(apmschema.Errors) + return nil +} + +func (t *gofuzzTransport) SendMetrics(ctx context.Context, payload *model.MetricsPayload) error { + t.writer.Reset() + if err := payload.MarshalFastJSON(&t.writer); err != nil { + return err + } + t.validate(apmschema.Metrics) + return nil +} + +func (t *gofuzzTransport) SendTransactions(ctx context.Context, payload *model.TransactionsPayload) error { + t.writer.Reset() + if err := payload.MarshalFastJSON(&t.writer); err != nil { + return err + } + t.validate(apmschema.Transactions) + return nil +} + +func (t *gofuzzTransport) validate(schema *jsonschema.Schema) { + if err := schema.Validate(bytes.NewReader(t.writer.Bytes())); err != nil { + panic(err) + } +} diff --git a/vendor/go.elastic.co/apm/internal/apmcontext/context.go b/vendor/go.elastic.co/apm/internal/apmcontext/context.go new file mode 100644 index 00000000000..e6ad7101937 --- /dev/null +++ b/vendor/go.elastic.co/apm/internal/apmcontext/context.go @@ -0,0 +1,78 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apmcontext + +import "context" + +var ( + // ContextWithSpan takes a context and span and returns a new context + // from which the span can be extracted using SpanFromContext. + // + // ContextWithSpan is used by apm.ContextWithSpan. It is a + // variable to allow other packages, such as apmot, to replace it + // at package init time. + ContextWithSpan = DefaultContextWithSpan + + // ContextWithTransaction takes a context and transaction and returns + // a new context from which the transaction can be extracted using + // TransactionFromContext. + // + // ContextWithTransaction is used by apm.ContextWithTransaction. + // It is a variable to allow other packages, such as apmot, to replace + // it at package init time. + ContextWithTransaction = DefaultContextWithTransaction + + // SpanFromContext returns a span included in the context using + // ContextWithSpan. + // + // SpanFromContext is used by apm.SpanFromContext. It is a + // variable to allow other packages, such as apmot, to replace it + // at package init time. + SpanFromContext = DefaultSpanFromContext + + // TransactionFromContext returns a transaction included in the context + // using ContextWithTransaction. + // + // TransactionFromContext is used by apm.TransactionFromContext. + // It is a variable to allow other packages, such as apmot, to replace + // it at package init time. + TransactionFromContext = DefaultTransactionFromContext +) + +type spanKey struct{} +type transactionKey struct{} + +// DefaultContextWithSpan is the default value for ContextWithSpan. +func DefaultContextWithSpan(ctx context.Context, span interface{}) context.Context { + return context.WithValue(ctx, spanKey{}, span) +} + +// DefaultContextWithTransaction is the default value for ContextWithTransaction. +func DefaultContextWithTransaction(ctx context.Context, tx interface{}) context.Context { + return context.WithValue(ctx, transactionKey{}, tx) +} + +// DefaultSpanFromContext is the default value for SpanFromContext. +func DefaultSpanFromContext(ctx context.Context) interface{} { + return ctx.Value(spanKey{}) +} + +// DefaultTransactionFromContext is the default value for TransactionFromContext. +func DefaultTransactionFromContext(ctx context.Context) interface{} { + return ctx.Value(transactionKey{}) +} diff --git a/vendor/go.elastic.co/apm/internal/apmhostutil/container.go b/vendor/go.elastic.co/apm/internal/apmhostutil/container.go new file mode 100644 index 00000000000..ff734852b2d --- /dev/null +++ b/vendor/go.elastic.co/apm/internal/apmhostutil/container.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apmhostutil + +import "go.elastic.co/apm/model" + +// Container returns information about the container running the process, or an +// error the container information could not be determined. +func Container() (*model.Container, error) { + return containerInfo() +} + +// Kubernetes returns information about the Kubernetes node and pod running +// the process, or an error if they could not be determined. This information +// does not include the KUBERNETES_* environment variables that can be set via +// the Downward API. +func Kubernetes() (*model.Kubernetes, error) { + return kubernetesInfo() +} diff --git a/vendor/go.elastic.co/apm/internal/apmhostutil/container_linux.go b/vendor/go.elastic.co/apm/internal/apmhostutil/container_linux.go new file mode 100644 index 00000000000..4ce16440549 --- /dev/null +++ b/vendor/go.elastic.co/apm/internal/apmhostutil/container_linux.go @@ -0,0 +1,156 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build linux + +package apmhostutil + +import ( + "bufio" + "errors" + "io" + "os" + "path" + "regexp" + "strings" + "sync" + + "go.elastic.co/apm/model" +) + +const ( + systemdScopeSuffix = ".scope" +) + +var ( + cgroupContainerInfoOnce sync.Once + cgroupContainerInfoError error + kubernetes *model.Kubernetes + container *model.Container + + kubepodsRegexp = regexp.MustCompile( + "" + + `(?:^/kubepods/[^/]+/pod([^/]+)/$)|` + + `(?:^/kubepods\.slice/kubepods-[^/]+\.slice/kubepods-[^/]+-pod([^/]+)\.slice/$)`, + ) + + containerIDRegexp = regexp.MustCompile( + "^" + + "[[:xdigit:]]{64}|" + + "[[:xdigit:]]{8}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{4,}" + + "$", + ) +) + +func containerInfo() (*model.Container, error) { + container, _, err := cgroupContainerInfo() + return container, err +} + +func kubernetesInfo() (*model.Kubernetes, error) { + _, kubernetes, err := cgroupContainerInfo() + if err == nil && kubernetes == nil { + return nil, errors.New("could not determine kubernetes info") + } + return kubernetes, err +} + +func cgroupContainerInfo() (*model.Container, *model.Kubernetes, error) { + cgroupContainerInfoOnce.Do(func() { + cgroupContainerInfoError = func() error { + f, err := os.Open("/proc/self/cgroup") + if err != nil { + return err + } + defer f.Close() + + c, k, err := readCgroupContainerInfo(f) + if err != nil { + return err + } + if c == nil { + return errors.New("could not determine container info") + } + container = c + kubernetes = k + return nil + }() + }) + return container, kubernetes, cgroupContainerInfoError +} + +func readCgroupContainerInfo(r io.Reader) (*model.Container, *model.Kubernetes, error) { + var container *model.Container + var kubernetes *model.Kubernetes + s := bufio.NewScanner(r) + for s.Scan() { + fields := strings.SplitN(s.Text(), ":", 3) + if len(fields) != 3 { + continue + } + cgroupPath := fields[2] + + // Depending on the filesystem driver used for cgroup + // management, the paths in /proc/pid/cgroup will have + // one of the following formats in a Docker container: + // + // systemd: /system.slice/docker-.scope + // cgroupfs: /docker/ + // + // In a Kubernetes pod, the cgroup path will look like: + // + // systemd: /kubepods.slice/kubepods-.slice/kubepods--pod.slice/.scope + // cgroupfs: /kubepods//pod/ + // + dir, id := path.Split(cgroupPath) + if strings.HasSuffix(id, systemdScopeSuffix) { + id = id[:len(id)-len(systemdScopeSuffix)] + if dash := strings.IndexRune(id, '-'); dash != -1 { + id = id[dash+1:] + } + } + if match := kubepodsRegexp.FindStringSubmatch(dir); match != nil { + // By default, Kubernetes will set the hostname of + // the pod containers to the pod name. Users that + // override the name should use the Downard API to + // override the pod name. + hostname, _ := os.Hostname() + uid := match[1] + if uid == "" { + // Systemd cgroup driver is being used, + // so we need to unescape '_' back to '-'. + uid = strings.Replace(match[2], "_", "-", -1) + } + kubernetes = &model.Kubernetes{ + Pod: &model.KubernetesPod{ + Name: hostname, + UID: uid, + }, + } + // We don't check the contents of the last path segment + // when we've matched "^/kubepods"; we assume that it is + // a valid container ID. + container = &model.Container{ID: id} + } else if containerIDRegexp.MatchString(id) { + container = &model.Container{ID: id} + } + } + if err := s.Err(); err != nil { + return nil, nil, err + } + return container, kubernetes, nil +} diff --git a/vendor/go.elastic.co/apm/internal/apmhostutil/container_nonlinux.go b/vendor/go.elastic.co/apm/internal/apmhostutil/container_nonlinux.go new file mode 100644 index 00000000000..a02a44292fb --- /dev/null +++ b/vendor/go.elastic.co/apm/internal/apmhostutil/container_nonlinux.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build !linux + +package apmhostutil + +import ( + "runtime" + + "github.com/pkg/errors" + + "go.elastic.co/apm/model" +) + +func containerInfo() (*model.Container, error) { + return nil, errors.Errorf("container ID computation not implemented for %s", runtime.GOOS) +} + +func kubernetesInfo() (*model.Kubernetes, error) { + return nil, errors.Errorf("kubernetes info gathering not implemented for %s", runtime.GOOS) +} diff --git a/vendor/go.elastic.co/apm/internal/apmhttputil/forwarded.go b/vendor/go.elastic.co/apm/internal/apmhttputil/forwarded.go new file mode 100644 index 00000000000..9001178150d --- /dev/null +++ b/vendor/go.elastic.co/apm/internal/apmhttputil/forwarded.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apmhttputil + +import ( + "strconv" + "strings" +) + +// ForwardedHeader holds information extracted from a "Forwarded" HTTP header. +type ForwardedHeader struct { + For string + Host string + Proto string +} + +// ParseForwarded parses a "Forwarded" HTTP header. +func ParseForwarded(f string) ForwardedHeader { + // We only consider the first value in the sequence, + // if there are multiple. Disregard everything after + // the first comma. + if comma := strings.IndexRune(f, ','); comma != -1 { + f = f[:comma] + } + var result ForwardedHeader + for f != "" { + field := f + if semi := strings.IndexRune(f, ';'); semi != -1 { + field = f[:semi] + f = f[semi+1:] + } else { + f = "" + } + eq := strings.IndexRune(field, '=') + if eq == -1 { + // Malformed field, ignore. + continue + } + key := strings.TrimSpace(field[:eq]) + value := strings.TrimSpace(field[eq+1:]) + if len(value) > 0 && value[0] == '"' { + var err error + value, err = strconv.Unquote(value) + if err != nil { + // Malformed, ignore + continue + } + } + switch strings.ToLower(key) { + case "for": + result.For = value + case "host": + result.Host = value + case "proto": + result.Proto = value + } + } + return result +} diff --git a/vendor/go.elastic.co/apm/internal/apmhttputil/remoteaddr.go b/vendor/go.elastic.co/apm/internal/apmhttputil/remoteaddr.go new file mode 100644 index 00000000000..e79400e6a8a --- /dev/null +++ b/vendor/go.elastic.co/apm/internal/apmhttputil/remoteaddr.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apmhttputil + +import ( + "net/http" + "strconv" +) + +// RemoteAddr returns the remote (peer) socket address for req, +// a server HTTP request. +func RemoteAddr(req *http.Request) string { + remoteAddr, _ := splitHost(req.RemoteAddr) + return remoteAddr +} + +// DestinationAddr returns the destination server address and port +// for req, a client HTTP request. +// +// If req.URL.Host contains a port it will be returned, and otherwise +// the default port according to req.URL.Scheme will be returned. If +// the included port is not a valid integer, or no port is included +// and the scheme is unknown, the returned port value will be zero. +func DestinationAddr(req *http.Request) (string, int) { + host, strport := splitHost(req.URL.Host) + var port int + if strport != "" { + port, _ = strconv.Atoi(strport) + } else { + port = SchemeDefaultPort(req.URL.Scheme) + } + return host, port +} + +// SchemeDefaultPort returns the default port for the given URI scheme, +// if known, or 0 otherwise. +func SchemeDefaultPort(scheme string) int { + switch scheme { + case "http": + return 80 + case "https": + return 443 + } + return 0 +} diff --git a/vendor/go.elastic.co/apm/internal/apmhttputil/url.go b/vendor/go.elastic.co/apm/internal/apmhttputil/url.go new file mode 100644 index 00000000000..45ea94181ad --- /dev/null +++ b/vendor/go.elastic.co/apm/internal/apmhttputil/url.go @@ -0,0 +1,113 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apmhttputil + +import ( + "net" + "net/http" + "strings" + + "go.elastic.co/apm/internal/apmstrings" + "go.elastic.co/apm/model" +) + +// RequestURL returns a model.URL for req. +// +// If req contains an absolute URI, the values will be split and +// sanitized, but no further processing performed. For all other +// requests (i.e. most server-side requests), we reconstruct the +// URL based on various proxy forwarding headers and other request +// attributes. +func RequestURL(req *http.Request) model.URL { + out := model.URL{ + Path: truncateString(req.URL.Path), + Search: truncateString(req.URL.RawQuery), + Hash: truncateString(req.URL.Fragment), + } + if req.URL.Host != "" { + // Absolute URI: client-side or proxy request, so ignore the + // headers. + hostname, port := splitHost(req.URL.Host) + out.Hostname = truncateString(hostname) + out.Port = truncateString(port) + out.Protocol = truncateString(req.URL.Scheme) + return out + } + + // This is a server-side request URI, which contains only the path. + // We synthesize the full URL by extracting the host and protocol + // from headers, or inferring from other properties. + var fullHost string + forwarded := ParseForwarded(req.Header.Get("Forwarded")) + if forwarded.Host != "" { + fullHost = forwarded.Host + out.Protocol = truncateString(forwarded.Proto) + } else if xfh := req.Header.Get("X-Forwarded-Host"); xfh != "" { + fullHost = xfh + } else { + fullHost = req.Host + } + hostname, port := splitHost(fullHost) + out.Hostname = truncateString(hostname) + out.Port = truncateString(port) + + // Protocol might be extracted from the Forwarded header. If it's not, + // look for various other headers. + if out.Protocol == "" { + if proto := req.Header.Get("X-Forwarded-Proto"); proto != "" { + out.Protocol = truncateString(proto) + } else if proto := req.Header.Get("X-Forwarded-Protocol"); proto != "" { + out.Protocol = truncateString(proto) + } else if proto := req.Header.Get("X-Url-Scheme"); proto != "" { + out.Protocol = truncateString(proto) + } else if req.Header.Get("Front-End-Https") == "on" { + out.Protocol = "https" + } else if req.Header.Get("X-Forwarded-Ssl") == "on" { + out.Protocol = "https" + } else if req.TLS != nil { + out.Protocol = "https" + } else { + // Assume http otherwise. + out.Protocol = "http" + } + } + return out +} + +func splitHost(in string) (host, port string) { + if strings.LastIndexByte(in, ':') == -1 { + // In the common (relative to other "errors") case that + // there is no colon, we can avoid allocations by not + // calling SplitHostPort. + return in, "" + } + host, port, err := net.SplitHostPort(in) + if err != nil { + if n := len(in); n > 1 && in[0] == '[' && in[n-1] == ']' { + in = in[1 : n-1] + } + return in, "" + } + return host, port +} + +func truncateString(s string) string { + // At the time of writing, all length limits are 1024. + s, _ = apmstrings.Truncate(s, 1024) + return s +} diff --git a/vendor/go.elastic.co/apm/internal/apmlog/logger.go b/vendor/go.elastic.co/apm/internal/apmlog/logger.go new file mode 100644 index 00000000000..d5cb1689338 --- /dev/null +++ b/vendor/go.elastic.co/apm/internal/apmlog/logger.go @@ -0,0 +1,173 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apmlog + +import ( + "fmt" + "io" + "log" + "os" + "strings" + "sync" + "time" + + "go.elastic.co/fastjson" +) + +var ( + // DefaultLogger is the default Logger to use, if ELASTIC_APM_LOG_* are specified. + DefaultLogger Logger + + fastjsonPool = &sync.Pool{ + New: func() interface{} { + return &fastjson.Writer{} + }, + } +) + +func init() { + initDefaultLogger() +} + +func initDefaultLogger() { + fileStr := strings.TrimSpace(os.Getenv("ELASTIC_APM_LOG_FILE")) + if fileStr == "" { + return + } + + var logWriter io.Writer + switch strings.ToLower(fileStr) { + case "stdout": + logWriter = os.Stdout + case "stderr": + logWriter = os.Stderr + default: + f, err := os.Create(fileStr) + if err != nil { + log.Printf("failed to create %q: %s (disabling logging)", fileStr, err) + return + } + logWriter = &syncFile{File: f} + } + + logLevel := errorLevel + if levelStr := strings.TrimSpace(os.Getenv("ELASTIC_APM_LOG_LEVEL")); levelStr != "" { + level, err := parseLogLevel(levelStr) + if err != nil { + log.Printf("invalid ELASTIC_APM_LOG_LEVEL %q, falling back to %q", levelStr, logLevel) + } else { + logLevel = level + } + } + DefaultLogger = levelLogger{w: logWriter, level: logLevel} +} + +const ( + debugLevel logLevel = iota + infoLevel + warnLevel + errorLevel + noLevel +) + +type logLevel uint8 + +func (l logLevel) String() string { + switch l { + case debugLevel: + return "debug" + case infoLevel: + return "info" + case warnLevel: + return "warn" + case errorLevel: + return "error" + } + return "" +} + +func parseLogLevel(s string) (logLevel, error) { + switch strings.ToLower(s) { + case "debug": + return debugLevel, nil + case "info": + return infoLevel, nil + case "warn": + return warnLevel, nil + case "error": + return errorLevel, nil + } + return noLevel, fmt.Errorf("invalid log level string %q", s) +} + +// Logger provides methods for logging. +type Logger interface { + Debugf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + Warningf(format string, args ...interface{}) +} + +type levelLogger struct { + w io.Writer + level logLevel +} + +// Debugf logs a message with log.Printf, with a DEBUG prefix. +func (l levelLogger) Debugf(format string, args ...interface{}) { + l.logf(debugLevel, format, args...) +} + +// Errorf logs a message with log.Printf, with an ERROR prefix. +func (l levelLogger) Errorf(format string, args ...interface{}) { + l.logf(errorLevel, format, args...) +} + +// Warningf logs a message with log.Printf, with a WARNING prefix. +func (l levelLogger) Warningf(format string, args ...interface{}) { + l.logf(warnLevel, format, args...) +} + +func (l levelLogger) logf(level logLevel, format string, args ...interface{}) { + if level < l.level { + return + } + jw := fastjsonPool.Get().(*fastjson.Writer) + jw.RawString(`{"level":"`) + jw.RawString(level.String()) + jw.RawString(`","time":"`) + jw.Time(time.Now(), time.RFC3339) + jw.RawString(`","message":`) + jw.String(fmt.Sprintf(format, args...)) + jw.RawString("}\n") + l.w.Write(jw.Bytes()) + jw.Reset() + fastjsonPool.Put(jw) +} + +type syncFile struct { + mu sync.Mutex + *os.File +} + +// Write calls f.File.Write with f.mu held, to protect multiple Tracers +// in the same process from one another. +func (f *syncFile) Write(data []byte) (int, error) { + f.mu.Lock() + defer f.mu.Unlock() + return f.File.Write(data) +} diff --git a/vendor/go.elastic.co/apm/internal/apmschema/schema.go b/vendor/go.elastic.co/apm/internal/apmschema/schema.go new file mode 100644 index 00000000000..412c8498847 --- /dev/null +++ b/vendor/go.elastic.co/apm/internal/apmschema/schema.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apmschema + +import ( + "log" + "path" + "path/filepath" + "runtime" + + "github.com/santhosh-tekuri/jsonschema" +) + +var ( + // Error is the compiled JSON Schema for an error. + Error *jsonschema.Schema + + // Metadata is the compiled JSON Schema for metadata. + Metadata *jsonschema.Schema + + // MetricSet is the compiled JSON Schema for a set of metrics. + MetricSet *jsonschema.Schema + + // Span is the compiled JSON Schema for a span. + Span *jsonschema.Schema + + // Transaction is the compiled JSON Schema for a transaction. + Transaction *jsonschema.Schema +) + +func init() { + _, filename, _, ok := runtime.Caller(0) + if !ok { + panic("source line info not available") + } + compiler := jsonschema.NewCompiler() + compiler.Draft = jsonschema.Draft4 + schemaDir := path.Join(filepath.ToSlash(filepath.Dir(filename)), "jsonschema") + if runtime.GOOS == "windows" { + schemaDir = "/" + schemaDir + } + compile := func(filepath string, out **jsonschema.Schema) { + schema, err := compiler.Compile("file://" + path.Join(schemaDir, filepath)) + if err != nil { + log.Fatal(err) + } + *out = schema + } + compile("errors/error.json", &Error) + compile("metadata.json", &Metadata) + compile("metricsets/metricset.json", &MetricSet) + compile("spans/span.json", &Span) + compile("transactions/transaction.json", &Transaction) +} diff --git a/vendor/go.elastic.co/apm/internal/apmschema/update.sh b/vendor/go.elastic.co/apm/internal/apmschema/update.sh new file mode 100644 index 00000000000..c47c86a25b5 --- /dev/null +++ b/vendor/go.elastic.co/apm/internal/apmschema/update.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +set -ex + +BRANCH=master + +FILES=( \ + "errors/error.json" \ + "sourcemaps/payload.json" \ + "spans/span.json" \ + "transactions/mark.json" \ + "transactions/transaction.json" \ + "metricsets/metricset.json" \ + "metricsets/sample.json" \ + "context.json" \ + "message.json" \ + "metadata.json" \ + "process.json" \ + "request.json" \ + "service.json" \ + "span_subtype.json" \ + "span_type.json" \ + "stacktrace_frame.json" \ + "system.json" \ + "tags.json" \ + "timestamp_epoch.json" \ + "transaction_name.json" \ + "transaction_type.json" \ + "user.json" \ +) + +mkdir -p jsonschema/errors jsonschema/transactions jsonschema/sourcemaps jsonschema/spans jsonschema/metricsets + +for i in "${FILES[@]}"; do + o=jsonschema/$i + curl -sf https://raw.githubusercontent.com/elastic/apm-server/${BRANCH}/docs/spec/${i} --compressed -o $o +done diff --git a/vendor/go.elastic.co/apm/internal/apmstrings/truncate.go b/vendor/go.elastic.co/apm/internal/apmstrings/truncate.go new file mode 100644 index 00000000000..0ed2b6d78af --- /dev/null +++ b/vendor/go.elastic.co/apm/internal/apmstrings/truncate.go @@ -0,0 +1,31 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apmstrings + +// Truncate returns s truncated at n runes, and the number +// of runes in the resulting string (<= n). +func Truncate(s string, n int) (string, int) { + var j int + for i := range s { + if j == n { + return s[:i], n + } + j++ + } + return s, j +} diff --git a/vendor/go.elastic.co/apm/internal/apmversion/version.go b/vendor/go.elastic.co/apm/internal/apmversion/version.go new file mode 100644 index 00000000000..0b3675b9420 --- /dev/null +++ b/vendor/go.elastic.co/apm/internal/apmversion/version.go @@ -0,0 +1,23 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apmversion + +const ( + // AgentVersion is the Elastic APM Go Agent version. + AgentVersion = "1.7.2" +) diff --git a/vendor/go.elastic.co/apm/internal/configutil/duration.go b/vendor/go.elastic.co/apm/internal/configutil/duration.go new file mode 100644 index 00000000000..f29a3dbf693 --- /dev/null +++ b/vendor/go.elastic.co/apm/internal/configutil/duration.go @@ -0,0 +1,73 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package configutil + +import ( + "fmt" + "strconv" + "strings" + "time" + "unicode" +) + +// ParseDuration parses s as a duration, accepting a subset +// of the syntax supported by time.ParseDuration. +// +// Valid time units are "ms", "s", "m". +func ParseDuration(s string) (time.Duration, error) { + orig := s + var mul time.Duration = 1 + if strings.HasPrefix(s, "-") { + mul = -1 + s = s[1:] + } + + sep := -1 + for i, c := range s { + if sep == -1 { + if c < '0' || c > '9' { + sep = i + break + } + } + } + if sep == -1 { + return 0, fmt.Errorf("missing unit in duration %s (allowed units: ms, s, m)", orig) + } + + n, err := strconv.ParseInt(s[:sep], 10, 32) + if err != nil { + return 0, fmt.Errorf("invalid duration %s", orig) + } + switch s[sep:] { + case "ms": + mul *= time.Millisecond + case "s": + mul *= time.Second + case "m": + mul *= time.Minute + default: + for _, c := range s[sep:] { + if unicode.IsSpace(c) { + return 0, fmt.Errorf("invalid character %q in duration %s", c, orig) + } + } + return 0, fmt.Errorf("invalid unit in duration %s (allowed units: ms, s, m)", orig) + } + return mul * time.Duration(n), nil +} diff --git a/vendor/go.elastic.co/apm/internal/configutil/env.go b/vendor/go.elastic.co/apm/internal/configutil/env.go new file mode 100644 index 00000000000..04ac3cb97d4 --- /dev/null +++ b/vendor/go.elastic.co/apm/internal/configutil/env.go @@ -0,0 +1,95 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package configutil + +import ( + "os" + "strconv" + "time" + + "github.com/pkg/errors" + + "go.elastic.co/apm/internal/wildcard" +) + +// ParseDurationEnv gets the value of the environment variable envKey +// and, if set, parses it as a duration. If the environment variable +// is unset, defaultDuration is returned. +func ParseDurationEnv(envKey string, defaultDuration time.Duration) (time.Duration, error) { + value := os.Getenv(envKey) + if value == "" { + return defaultDuration, nil + } + d, err := ParseDuration(value) + if err != nil { + return 0, errors.Wrapf(err, "failed to parse %s", envKey) + } + return d, nil +} + +// ParseSizeEnv gets the value of the environment variable envKey +// and, if set, parses it as a size. If the environment variable +// is unset, defaultSize is returned. +func ParseSizeEnv(envKey string, defaultSize Size) (Size, error) { + value := os.Getenv(envKey) + if value == "" { + return defaultSize, nil + } + s, err := ParseSize(value) + if err != nil { + return 0, errors.Wrapf(err, "failed to parse %s", envKey) + } + return s, nil +} + +// ParseBoolEnv gets the value of the environment variable envKey +// and, if set, parses it as a boolean. If the environment variable +// is unset, defaultValue is returned. +func ParseBoolEnv(envKey string, defaultValue bool) (bool, error) { + value := os.Getenv(envKey) + if value == "" { + return defaultValue, nil + } + b, err := strconv.ParseBool(value) + if err != nil { + return false, errors.Wrapf(err, "failed to parse %s", envKey) + } + return b, nil +} + +// ParseListEnv gets the value of the environment variable envKey +// and, if set, parses it as a list separated by sep. If the environment +// variable is unset, defaultValue is returned. +func ParseListEnv(envKey, sep string, defaultValue []string) []string { + value := os.Getenv(envKey) + if value == "" { + return defaultValue + } + return ParseList(value, sep) +} + +// ParseWildcardPatternsEnv gets the value of the environment variable envKey +// and, if set, parses it as a list of wildcard patterns. If the environment +// variable is unset, defaultValue is returned. +func ParseWildcardPatternsEnv(envKey string, defaultValue wildcard.Matchers) wildcard.Matchers { + value := os.Getenv(envKey) + if value == "" { + return defaultValue + } + return ParseWildcardPatterns(value) +} diff --git a/vendor/go.elastic.co/apm/internal/configutil/list.go b/vendor/go.elastic.co/apm/internal/configutil/list.go new file mode 100644 index 00000000000..ceed90199d4 --- /dev/null +++ b/vendor/go.elastic.co/apm/internal/configutil/list.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package configutil + +import "strings" + +// ParseList parses s as a list of strings, separated by sep, +// and with whitespace trimmed from the list items, omitting +// empty items. +func ParseList(s, sep string) []string { + var list []string + for _, item := range strings.Split(s, sep) { + item = strings.TrimSpace(item) + if item != "" { + list = append(list, item) + } + } + return list +} diff --git a/vendor/go.elastic.co/apm/internal/configutil/size.go b/vendor/go.elastic.co/apm/internal/configutil/size.go new file mode 100644 index 00000000000..84f9657f857 --- /dev/null +++ b/vendor/go.elastic.co/apm/internal/configutil/size.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package configutil + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +// Size represents a size in bytes. +type Size int64 + +// Common power-of-two sizes. +const ( + Byte Size = 1 + KByte Size = 1024 + MByte Size = 1024 * 1024 + GByte Size = 1024 * 1024 * 1024 +) + +// Bytes returns s as a number of bytes. +func (s Size) Bytes() int64 { + return int64(s) +} + +// String returns s in its most compact string representation. +func (s Size) String() string { + if s == 0 { + return "0B" + } + switch { + case s%GByte == 0: + return fmt.Sprintf("%dGB", s/GByte) + case s%MByte == 0: + return fmt.Sprintf("%dMB", s/MByte) + case s%KByte == 0: + return fmt.Sprintf("%dKB", s/KByte) + default: + return fmt.Sprintf("%dB", s) + } +} + +// ParseSize parses s as a size, in bytes. +// +// Valid size units are "b", "kb", "mb", "gb". +func ParseSize(s string) (Size, error) { + orig := s + var mul Size = 1 + if strings.HasPrefix(s, "-") { + mul = -1 + s = s[1:] + } + + sep := -1 + for i, c := range s { + if sep == -1 { + if c < '0' || c > '9' { + sep = i + break + } + } + } + if sep == -1 { + return 0, fmt.Errorf("missing unit in size %s (allowed units: B, KB, MB, GB)", orig) + } + + n, err := strconv.ParseInt(s[:sep], 10, 32) + if err != nil { + return 0, fmt.Errorf("invalid size %s", orig) + } + switch strings.ToLower(s[sep:]) { + case "gb": + mul = GByte + case "mb": + mul = MByte + case "kb": + mul = KByte + case "b": + default: + for _, c := range s[sep:] { + if unicode.IsSpace(c) { + return 0, fmt.Errorf("invalid character %q in size %s", c, orig) + } + } + return 0, fmt.Errorf("invalid unit in size %s (allowed units: B, KB, MB, GB)", orig) + } + return mul * Size(n), nil +} diff --git a/vendor/go.elastic.co/apm/internal/configutil/wildcards.go b/vendor/go.elastic.co/apm/internal/configutil/wildcards.go new file mode 100644 index 00000000000..1c47383fc62 --- /dev/null +++ b/vendor/go.elastic.co/apm/internal/configutil/wildcards.go @@ -0,0 +1,62 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package configutil + +import ( + "strings" + + "go.elastic.co/apm/internal/wildcard" +) + +// ParseWildcardPatterns parses s as a comma-separated list of wildcard patterns, +// and returns wildcard.Matchers for each. +// +// Patterns support the "*" wildcard, which will match zero or more characters. +// A prefix of (?-i) treats the pattern case-sensitively, while a prefix of (?i) +// treats the pattern case-insensitively (the default). All other characters in +// the pattern are matched exactly. +func ParseWildcardPatterns(s string) wildcard.Matchers { + patterns := ParseList(s, ",") + matchers := make(wildcard.Matchers, len(patterns)) + for i, p := range patterns { + matchers[i] = ParseWildcardPattern(p) + } + return matchers +} + +// ParseWildcardPattern parses p as a wildcard pattern, returning a wildcard.Matcher. +// +// Patterns support the "*" wildcard, which will match zero or more characters. +// A prefix of (?-i) treats the pattern case-sensitively, while a prefix of (?i) +// treats the pattern case-insensitively (the default). All other characters in +// the pattern are matched exactly. +func ParseWildcardPattern(p string) *wildcard.Matcher { + const ( + caseSensitivePrefix = "(?-i)" + caseInsensitivePrefix = "(?i)" + ) + caseSensitive := wildcard.CaseInsensitive + switch { + case strings.HasPrefix(p, caseSensitivePrefix): + caseSensitive = wildcard.CaseSensitive + p = p[len(caseSensitivePrefix):] + case strings.HasPrefix(p, caseInsensitivePrefix): + p = p[len(caseInsensitivePrefix):] + } + return wildcard.NewMatcher(p, caseSensitive) +} diff --git a/vendor/go.elastic.co/apm/internal/iochan/doc.go b/vendor/go.elastic.co/apm/internal/iochan/doc.go new file mode 100644 index 00000000000..4898c05bf6e --- /dev/null +++ b/vendor/go.elastic.co/apm/internal/iochan/doc.go @@ -0,0 +1,19 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package iochan provides a channel-based io.Reader. +package iochan diff --git a/vendor/go.elastic.co/apm/internal/iochan/reader.go b/vendor/go.elastic.co/apm/internal/iochan/reader.go new file mode 100644 index 00000000000..0025667306b --- /dev/null +++ b/vendor/go.elastic.co/apm/internal/iochan/reader.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package iochan + +import ( + "sync" +) + +// Reader is a channel-based io.Reader. +// +// Reader is safe for use in a single producer, single consumer pattern. +type Reader struct { + // C can be used for receiving read requests. + // + // Once a read request is received, it must be responded + // to, in order to avoid blocking the reader. + C <-chan ReadRequest + c chan ReadRequest + resp chan readResponse + + mu sync.Mutex + readClosed bool + writeClosed bool + readErr error +} + +// NewReader returns a new Reader. +func NewReader() *Reader { + c := make(chan ReadRequest) + return &Reader{ + C: c, + c: c, + resp: make(chan readResponse, 1), + } +} + +// CloseWrite closes reader.C. CloseWrite is idempotent, +// but must not be called concurrently with Read. +func (r *Reader) CloseWrite() { + r.mu.Lock() + defer r.mu.Unlock() + if !r.writeClosed { + r.writeClosed = true + close(r.c) + } +} + +// CloseRead closes the reader such that any waiting or future +// Reads return err. Additional calls to CloseRead have no +// effect. CloseRead must not be called concurrently with +// ReadRequest.Respond. +func (r *Reader) CloseRead(err error) error { + r.mu.Lock() + defer r.mu.Unlock() + if !r.readClosed { + r.readClosed = true + r.readErr = err + close(r.resp) + } + return nil +} + +// Read sends a ReadRequest to r.C containing buf, and returns the +// response sent by the channel consumer via the read request's +// Response method. +func (r *Reader) Read(buf []byte) (int, error) { + select { + case <-r.resp: + return 0, r.readErr + case r.c <- ReadRequest{Buf: buf, response: r.resp}: + } + resp, ok := <-r.resp + if !ok { + return 0, r.readErr + } + return resp.N, resp.Err +} + +// ReadRequest holds the buffer and response channel for a read request. +type ReadRequest struct { + // Buf is the read buffer into which data should be read. + Buf []byte + response chan<- readResponse +} + +// Respond responds to the Read request. Respond must not be called +// concurrently with Reader.Close. +func (rr *ReadRequest) Respond(n int, err error) { + rr.response <- readResponse{N: n, Err: err} +} + +type readResponse struct { + N int + Err error +} diff --git a/vendor/go.elastic.co/apm/internal/pkgerrorsutil/pkgerrors.go b/vendor/go.elastic.co/apm/internal/pkgerrorsutil/pkgerrors.go new file mode 100644 index 00000000000..01ace8b18d3 --- /dev/null +++ b/vendor/go.elastic.co/apm/internal/pkgerrorsutil/pkgerrors.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package pkgerrorsutil + +import ( + "reflect" + "runtime" + "unsafe" + + "github.com/pkg/errors" + + "go.elastic.co/apm/stacktrace" +) + +var ( + uintptrType = reflect.TypeOf(uintptr(0)) + runtimeFrameType = reflect.TypeOf(runtime.Frame{}) + errorsStackTraceUintptr = uintptrType.ConvertibleTo(reflect.TypeOf(*new(errors.Frame))) + errorsStackTraceFrame = reflect.TypeOf(*new(errors.Frame)).ConvertibleTo(runtimeFrameType) +) + +// AppendStacktrace appends stack frames to out, based on stackTrace. +func AppendStacktrace(stackTrace errors.StackTrace, out *[]stacktrace.Frame, limit int) { + // github.com/pkg/errors 0.8.x and earlier represent + // stack frames as uintptr; 0.9.0 and later represent + // them as runtime.Frames. + // + // TODO(axw) drop support for older github.com/pkg/errors + // versions when we release go.elastic.co/apm v2.0.0. + if errorsStackTraceUintptr { + pc := make([]uintptr, len(stackTrace)) + for i, frame := range stackTrace { + pc[i] = *(*uintptr)(unsafe.Pointer(&frame)) + } + *out = stacktrace.AppendCallerFrames(*out, pc, limit) + } else if errorsStackTraceFrame { + if limit >= 0 && len(stackTrace) > limit { + stackTrace = stackTrace[:limit] + } + for _, frame := range stackTrace { + rf := (*runtime.Frame)(unsafe.Pointer(&frame)) + *out = append(*out, stacktrace.RuntimeFrame(*rf)) + } + } +} diff --git a/vendor/go.elastic.co/apm/internal/ringbuffer/buffer.go b/vendor/go.elastic.co/apm/internal/ringbuffer/buffer.go new file mode 100644 index 00000000000..4994172d144 --- /dev/null +++ b/vendor/go.elastic.co/apm/internal/ringbuffer/buffer.go @@ -0,0 +1,142 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package ringbuffer + +import ( + "bytes" + "encoding/binary" + "io" + "io/ioutil" +) + +// BlockHeaderSize is the size of the block header, in bytes. +const BlockHeaderSize = 5 + +// BlockTag is a block tag, which can be used for classification. +type BlockTag uint8 + +// BlockHeader holds a fixed-size block header. +type BlockHeader struct { + // Tag is the block's tag. + Tag BlockTag + + // Size is the size of the block data, in bytes. + Size uint32 +} + +// Buffer is a ring buffer of byte blocks. +type Buffer struct { + buf []byte + headerbuf [BlockHeaderSize]byte + len int + write int + read int + + // Evicted will be called when an old block is evicted to make place for a new one. + Evicted func(BlockHeader) +} + +// New returns a new Buffer with the given size in bytes. +func New(size int) *Buffer { + return &Buffer{ + buf: make([]byte, size), + Evicted: func(BlockHeader) {}, + } +} + +// Len returns the number of bytes currently in the buffer, including +// block-accounting bytes. +func (b *Buffer) Len() int { + return b.len +} + +// Cap returns the capacity of the buffer. +func (b *Buffer) Cap() int { + return len(b.buf) +} + +// WriteBlockTo writes the oldest block in b to w, returning the block header and the number of bytes written to w. +func (b *Buffer) WriteBlockTo(w io.Writer) (header BlockHeader, written int64, err error) { + if b.len == 0 { + return header, 0, io.EOF + } + if n := copy(b.headerbuf[:], b.buf[b.read:]); n < len(b.headerbuf) { + b.read = copy(b.headerbuf[n:], b.buf[:]) + } else { + b.read = (b.read + n) % b.Cap() + } + b.len -= len(b.headerbuf) + header.Tag = BlockTag(b.headerbuf[0]) + header.Size = binary.LittleEndian.Uint32(b.headerbuf[1:]) + size := int(header.Size) + + if b.read+size > b.Cap() { + tail := b.buf[b.read:] + n, err := w.Write(tail) + if err != nil { + b.read = (b.read + size) % b.Cap() + b.len -= size + len(b.headerbuf) + return header, int64(n), err + } + size -= n + written = int64(n) + b.read = 0 + b.len -= n + } + n, err := w.Write(b.buf[b.read : b.read+size]) + if err != nil { + return header, written + int64(n), err + } + written += int64(n) + b.read = (b.read + size) % b.Cap() + b.len -= size + return header, written, nil +} + +// WriteBlock writes p as a block to b, with tag t. +// +// If len(p)+BlockHeaderSize > b.Cap(), bytes.ErrTooLarge will be returned. +// If the buffer does not currently have room for the block, then the +// oldest blocks will be evicted until enough room is available. +func (b *Buffer) WriteBlock(p []byte, tag BlockTag) (int, error) { + lenp := len(p) + if lenp+BlockHeaderSize > b.Cap() { + return 0, bytes.ErrTooLarge + } + for lenp+BlockHeaderSize > b.Cap()-b.Len() { + header, _, err := b.WriteBlockTo(ioutil.Discard) + if err != nil { + return 0, err + } + b.Evicted(header) + } + b.headerbuf[0] = uint8(tag) + binary.LittleEndian.PutUint32(b.headerbuf[1:], uint32(lenp)) + if n := copy(b.buf[b.write:], b.headerbuf[:]); n < len(b.headerbuf) { + b.write = copy(b.buf, b.headerbuf[n:]) + } else { + b.write = (b.write + n) % b.Cap() + } + if n := copy(b.buf[b.write:], p); n < lenp { + b.write = copy(b.buf, p[n:]) + } else { + b.write = (b.write + n) % b.Cap() + } + b.len += lenp + BlockHeaderSize + return lenp, nil +} diff --git a/vendor/go.elastic.co/apm/internal/ringbuffer/doc.go b/vendor/go.elastic.co/apm/internal/ringbuffer/doc.go new file mode 100644 index 00000000000..897578849c3 --- /dev/null +++ b/vendor/go.elastic.co/apm/internal/ringbuffer/doc.go @@ -0,0 +1,22 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package ringbuffer provides a ring buffer for storing blocks of bytes. +// Bytes are written and read in discrete blocks. If the buffer becomes +// full, then writing to it will evict the oldest blocks until there is +// space for a new one. +package ringbuffer diff --git a/vendor/go.elastic.co/apm/internal/wildcard/doc.go b/vendor/go.elastic.co/apm/internal/wildcard/doc.go new file mode 100644 index 00000000000..07645ad995b --- /dev/null +++ b/vendor/go.elastic.co/apm/internal/wildcard/doc.go @@ -0,0 +1,19 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package wildcard provides a fast, zero-allocation wildcard matcher. +package wildcard diff --git a/vendor/go.elastic.co/apm/internal/wildcard/matcher.go b/vendor/go.elastic.co/apm/internal/wildcard/matcher.go new file mode 100644 index 00000000000..e406eb6eb8d --- /dev/null +++ b/vendor/go.elastic.co/apm/internal/wildcard/matcher.go @@ -0,0 +1,142 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package wildcard + +import ( + "strings" + "unicode" + "unicode/utf8" +) + +// CaseSensitivity controls the case sensitivity of matching. +type CaseSensitivity bool + +// CaseSensitivity values. +const ( + CaseSensitive CaseSensitivity = true + CaseInsensitive CaseSensitivity = false +) + +// NewMatcher constructs a new wildcard matcher for the given pattern. +// +// If p is the empty string, it will match only the empty string. +// If p is not a valid UTF-8 string, matching behaviour is undefined. +func NewMatcher(p string, caseSensitive CaseSensitivity) *Matcher { + parts := strings.Split(p, "*") + m := &Matcher{ + wildcardBegin: strings.HasPrefix(p, "*"), + wildcardEnd: strings.HasSuffix(p, "*"), + caseSensitive: caseSensitive, + } + for _, part := range parts { + if part == "" { + continue + } + if !m.caseSensitive { + part = strings.ToLower(part) + } + m.parts = append(m.parts, part) + } + return m +} + +// Matcher matches strings against a wildcard pattern with configurable case sensitivity. +type Matcher struct { + parts []string + wildcardBegin bool + wildcardEnd bool + caseSensitive CaseSensitivity +} + +// Match reports whether s matches m's wildcard pattern. +func (m *Matcher) Match(s string) bool { + if len(m.parts) == 0 && !m.wildcardBegin && !m.wildcardEnd { + return s == "" + } + if len(m.parts) == 1 && !m.wildcardBegin && !m.wildcardEnd { + if m.caseSensitive { + return s == m.parts[0] + } + return len(s) == len(m.parts[0]) && hasPrefixLower(s, m.parts[0]) == 0 + } + parts := m.parts + if !m.wildcardEnd && len(parts) > 0 { + part := parts[len(parts)-1] + if m.caseSensitive { + if !strings.HasSuffix(s, part) { + return false + } + } else { + if len(s) < len(part) { + return false + } + if hasPrefixLower(s[len(s)-len(part):], part) != 0 { + return false + } + } + parts = parts[:len(parts)-1] + } + for i, part := range parts { + j := -1 + if m.caseSensitive { + if i > 0 || m.wildcardBegin { + j = strings.Index(s, part) + } else { + if !strings.HasPrefix(s, part) { + return false + } + j = 0 + } + } else { + off := 0 + for j == -1 && len(s)-off >= len(part) { + skip := hasPrefixLower(s[off:], part) + if skip == 0 { + j = off + } else { + if i == 0 && !m.wildcardBegin { + return false + } + off += skip + } + } + } + if j == -1 { + return false + } + s = s[j+len(part):] + } + return true +} + +// hasPrefixLower reports whether or not s begins with prefixLower, +// returning 0 if it does, and the number of bytes representing the +// first rune in s otherwise. +func hasPrefixLower(s, prefixLower string) (skip int) { + var firstSize int + for i, r := range prefixLower { + r2, size := utf8.DecodeRuneInString(s[i:]) + if firstSize == 0 { + firstSize = size + } + if r2 != r && r2 != unicode.ToUpper(r) { + return firstSize + } + } + return 0 +} diff --git a/vendor/go.elastic.co/apm/internal/wildcard/matchers.go b/vendor/go.elastic.co/apm/internal/wildcard/matchers.go new file mode 100644 index 00000000000..5d443701913 --- /dev/null +++ b/vendor/go.elastic.co/apm/internal/wildcard/matchers.go @@ -0,0 +1,31 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package wildcard + +// Matchers is a slice of Matcher, matching any of the contained matchers. +type Matchers []*Matcher + +// MatchAny returns true iff any of the matchers returns true. +func (m Matchers) MatchAny(s string) bool { + for _, m := range m { + if m.Match(s) { + return true + } + } + return false +} diff --git a/vendor/go.elastic.co/apm/logger.go b/vendor/go.elastic.co/apm/logger.go new file mode 100644 index 00000000000..8e30e5918a8 --- /dev/null +++ b/vendor/go.elastic.co/apm/logger.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apm + +// Logger is an interface for logging, used by the tracer +// to log tracer errors and other interesting events. +type Logger interface { + // Debugf logs a message at debug level. + Debugf(format string, args ...interface{}) + + // Errorf logs a message at error level. + Errorf(format string, args ...interface{}) +} + +// WarningLogger extends Logger with a Warningf method. +// +// TODO(axw) this will be removed in v2.0.0, and the +// Warningf method will be added directly to Logger. +type WarningLogger interface { + Logger + + // Warningf logs a message at warning level. + Warningf(format string, args ...interface{}) +} + +func makeWarningLogger(l Logger) WarningLogger { + if wl, ok := l.(WarningLogger); ok { + return wl + } + return debugWarningLogger{Logger: l} +} + +type debugWarningLogger struct { + Logger +} + +func (l debugWarningLogger) Warningf(format string, args ...interface{}) { + l.Debugf(format, args...) +} diff --git a/vendor/go.elastic.co/apm/metrics.go b/vendor/go.elastic.co/apm/metrics.go new file mode 100644 index 00000000000..6f5ecb7e201 --- /dev/null +++ b/vendor/go.elastic.co/apm/metrics.go @@ -0,0 +1,161 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apm + +import ( + "context" + "sort" + "strings" + "sync" + + "go.elastic.co/apm/internal/wildcard" + "go.elastic.co/apm/model" +) + +// Metrics holds a set of metrics. +type Metrics struct { + disabled wildcard.Matchers + + mu sync.Mutex + metrics []*model.Metrics + + // transactionGroupMetrics holds metrics which are scoped to transaction + // groups, and are not sorted according to their labels. + transactionGroupMetrics []*model.Metrics +} + +func (m *Metrics) reset() { + m.metrics = m.metrics[:0] + m.transactionGroupMetrics = m.transactionGroupMetrics[:0] +} + +// MetricLabel is a name/value pair for labeling metrics. +type MetricLabel struct { + // Name is the label name. + Name string + + // Value is the label value. + Value string +} + +// MetricsGatherer provides an interface for gathering metrics. +type MetricsGatherer interface { + // GatherMetrics gathers metrics and adds them to m. + // + // If ctx.Done() is signaled, gathering should be aborted and + // ctx.Err() returned. If GatherMetrics returns an error, it + // will be logged, but otherwise there is no effect; the + // implementation must take care not to leave m in an invalid + // state due to errors. + GatherMetrics(ctx context.Context, m *Metrics) error +} + +// GatherMetricsFunc is a function type implementing MetricsGatherer. +type GatherMetricsFunc func(context.Context, *Metrics) error + +// GatherMetrics calls f(ctx, m). +func (f GatherMetricsFunc) GatherMetrics(ctx context.Context, m *Metrics) error { + return f(ctx, m) +} + +// Add adds a metric with the given name, labels, and value, +// The labels are expected to be sorted lexicographically. +func (m *Metrics) Add(name string, labels []MetricLabel, value float64) { + m.addMetric(name, labels, model.Metric{Value: value}) +} + +func (m *Metrics) addMetric(name string, labels []MetricLabel, metric model.Metric) { + if m.disabled.MatchAny(name) { + return + } + m.mu.Lock() + defer m.mu.Unlock() + + var metrics *model.Metrics + results := make([]int, len(m.metrics)) + i := sort.Search(len(m.metrics), func(j int) bool { + results[j] = compareLabels(m.metrics[j].Labels, labels) + return results[j] >= 0 + }) + if i < len(results) && results[i] == 0 { + // labels are equal + metrics = m.metrics[i] + } else { + var modelLabels model.StringMap + if len(labels) > 0 { + modelLabels = make(model.StringMap, len(labels)) + for i, l := range labels { + modelLabels[i] = model.StringMapItem{ + Key: l.Name, Value: l.Value, + } + } + } + metrics = &model.Metrics{ + Labels: modelLabels, + Samples: make(map[string]model.Metric), + } + if i == len(results) { + m.metrics = append(m.metrics, metrics) + } else { + m.metrics = append(m.metrics, nil) + copy(m.metrics[i+1:], m.metrics[i:]) + m.metrics[i] = metrics + } + } + metrics.Samples[name] = metric +} + +func compareLabels(a model.StringMap, b []MetricLabel) int { + na, nb := len(a), len(b) + n := na + if na > nb { + n = nb + } + for i := 0; i < n; i++ { + la, lb := a[i], b[i] + d := strings.Compare(la.Key, lb.Name) + if d == 0 { + d = strings.Compare(la.Value, lb.Value) + } + if d != 0 { + return d + } + } + switch { + case na < nb: + return -1 + case na > nb: + return 1 + } + return 0 +} + +func gatherMetrics(ctx context.Context, g MetricsGatherer, m *Metrics, logger Logger) { + defer func() { + if r := recover(); r != nil { + if logger != nil { + logger.Debugf("%T.GatherMetrics panicked: %s", g, r) + } + } + }() + if err := g.GatherMetrics(ctx, m); err != nil { + if logger != nil && err != context.Canceled { + logger.Debugf("%T.GatherMetrics failed: %s", g, err) + } + } +} diff --git a/vendor/go.elastic.co/apm/model/doc.go b/vendor/go.elastic.co/apm/model/doc.go new file mode 100644 index 00000000000..3fe20d31a65 --- /dev/null +++ b/vendor/go.elastic.co/apm/model/doc.go @@ -0,0 +1,21 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package model provides the Elastic APM model types. +// +// https://www.elastic.co/guide/en/apm/server/current/intake-api.html +package model diff --git a/vendor/go.elastic.co/apm/model/generate.sh b/vendor/go.elastic.co/apm/model/generate.sh new file mode 100644 index 00000000000..f403a8dd5a9 --- /dev/null +++ b/vendor/go.elastic.co/apm/model/generate.sh @@ -0,0 +1,4 @@ +#!/bin/sh +set -e +go run go.elastic.co/fastjson/cmd/generate-fastjson -f -o marshal_fastjson.go . +exec go-licenser marshal_fastjson.go diff --git a/vendor/go.elastic.co/apm/model/gofuzz.go b/vendor/go.elastic.co/apm/model/gofuzz.go new file mode 100644 index 00000000000..04fb279e252 --- /dev/null +++ b/vendor/go.elastic.co/apm/model/gofuzz.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build gofuzz + +package model + +import ( + "bytes" + "encoding/json" + + "go.elastic.co/apm/internal/apmschema" + "go.elastic.co/fastjson" +) + +func Fuzz(data []byte) int { + type Payload struct { + Service *Service `json:"service"` + Process *Process `json:"process,omitempty"` + System *System `json:"system,omitempty"` + Errors []*Error `json:"errors"` + Transactions []Transaction `json:"transactions"` + } + + var payload Payload + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + if err := decoder.Decode(&payload); err != nil { + return -1 + } + raw := make(map[string]interface{}) + if err := json.Unmarshal(data, &raw); err != nil { + return -1 + } + + if len(payload.Errors) != 0 { + payload := ErrorsPayload{ + Service: payload.Service, + Process: payload.Process, + System: payload.System, + Errors: payload.Errors, + } + var w fastjson.Writer + if err := payload.MarshalFastJSON(&w); err != nil { + panic(err) + } + if err := apmschema.Errors.Validate(bytes.NewReader(w.Bytes())); err != nil { + panic(err) + } + } + + if len(payload.Transactions) != 0 { + payload := TransactionsPayload{ + Service: payload.Service, + Process: payload.Process, + System: payload.System, + Transactions: payload.Transactions, + } + var w fastjson.Writer + if err := payload.MarshalFastJSON(&w); err != nil { + panic(err) + } + if err := apmschema.Transactions.Validate(bytes.NewReader(w.Bytes())); err != nil { + panic(err) + } + } + return 0 +} diff --git a/vendor/go.elastic.co/apm/model/maps.go b/vendor/go.elastic.co/apm/model/maps.go new file mode 100644 index 00000000000..7313d9cf7b0 --- /dev/null +++ b/vendor/go.elastic.co/apm/model/maps.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package model + +// StringMap is a slice-representation of map[string]string, +// optimized for fast JSON encoding. +// +// Slice items are expected to be ordered by key. +type StringMap []StringMapItem + +// StringMapItem holds a string key and value. +type StringMapItem struct { + // Key is the map item's key. + Key string + + // Value is the map item's value. + Value string +} + +// IfaceMap is a slice-representation of map[string]interface{}, +// optimized for fast JSON encoding. +// +// Slice items are expected to be ordered by key. +type IfaceMap []IfaceMapItem + +// IfaceMapItem holds a string key and value. +type IfaceMapItem struct { + // Key is the map item's key. + Key string + + // Value is the map item's value. + Value interface{} +} diff --git a/vendor/go.elastic.co/apm/model/marshal.go b/vendor/go.elastic.co/apm/model/marshal.go new file mode 100644 index 00000000000..298d4bcdb68 --- /dev/null +++ b/vendor/go.elastic.co/apm/model/marshal.go @@ -0,0 +1,639 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package model + +import ( + "encoding/hex" + "encoding/json" + "net/http" + "net/url" + "sort" + "strings" + "time" + + "github.com/pkg/errors" + + "go.elastic.co/fastjson" +) + +//go:generate sh generate.sh + +// MarshalFastJSON writes the JSON representation of t to w. +func (t Time) MarshalFastJSON(w *fastjson.Writer) error { + w.Int64(time.Time(t).UnixNano() / int64(time.Microsecond)) + return nil +} + +// UnmarshalJSON unmarshals the JSON data into t. +func (t *Time) UnmarshalJSON(data []byte) error { + var usec int64 + if err := json.Unmarshal(data, &usec); err != nil { + return err + } + *t = Time(time.Unix(usec/1000000, (usec%1000000)*1000).UTC()) + return nil +} + +// UnmarshalJSON unmarshals the JSON data into v. +func (v *HTTPSpanContext) UnmarshalJSON(data []byte) error { + var httpSpanContext struct { + URL string + StatusCode int `json:"status_code"` + } + if err := json.Unmarshal(data, &httpSpanContext); err != nil { + return err + } + u, err := url.Parse(httpSpanContext.URL) + if err != nil { + return err + } + v.URL = u + v.StatusCode = httpSpanContext.StatusCode + return nil +} + +// MarshalFastJSON writes the JSON representation of v to w. +func (v *HTTPSpanContext) MarshalFastJSON(w *fastjson.Writer) error { + w.RawByte('{') + beforeURL := w.Size() + w.RawString(`"url":"`) + if v.marshalURL(w) { + w.RawByte('"') + } else { + w.Rewind(beforeURL) + } + if v.StatusCode > 0 { + w.RawString(`,"status_code":`) + w.Int64(int64(v.StatusCode)) + } + w.RawByte('}') + return nil +} + +func (v *HTTPSpanContext) marshalURL(w *fastjson.Writer) bool { + if v.URL.Scheme != "" { + if !marshalScheme(w, v.URL.Scheme) { + return false + } + w.RawString("://") + } else { + w.RawString("http://") + } + w.StringContents(v.URL.Host) + if v.URL.Path == "" { + w.RawByte('/') + } else { + if v.URL.Path[0] != '/' { + w.RawByte('/') + } + w.StringContents(v.URL.Path) + } + if v.URL.RawQuery != "" { + w.RawByte('?') + w.StringContents(v.URL.RawQuery) + } + if v.URL.Fragment != "" { + w.RawByte('#') + w.StringContents(v.URL.Fragment) + } + return true +} + +// MarshalFastJSON writes the JSON representation of v to w. +func (v *URL) MarshalFastJSON(w *fastjson.Writer) error { + w.RawByte('{') + first := true + if v.Hash != "" { + const prefix = ",\"hash\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.String(v.Hash) + } + if v.Hostname != "" { + const prefix = ",\"hostname\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.String(v.Hostname) + } + if v.Path != "" { + const prefix = `,"pathname":"` + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + if v.Path[0] != '/' { + w.RawByte('/') + } + w.StringContents(v.Path) + w.RawByte('"') + } + if v.Port != "" { + const prefix = ",\"port\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.String(v.Port) + } + schemeBegin := -1 + schemeEnd := -1 + if v.Protocol != "" { + before := w.Size() + const prefix = ",\"protocol\":\"" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + schemeBegin = w.Size() + if marshalScheme(w, v.Protocol) { + schemeEnd = w.Size() + w.RawByte('"') + } else { + w.Rewind(before) + } + } + if v.Search != "" { + const prefix = ",\"search\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.String(v.Search) + } + if schemeEnd != -1 && v.Hostname != "" && v.Path != "" { + before := w.Size() + w.RawString(",\"full\":") + if !v.marshalFullURL(w, w.Bytes()[schemeBegin:schemeEnd]) { + w.Rewind(before) + } + } + w.RawByte('}') + return nil +} + +func marshalScheme(w *fastjson.Writer, scheme string) bool { + // Canonicalize the scheme to lowercase. Don't use + // strings.ToLower, as it's too general and requires + // additional memory allocations. + // + // The scheme should start with a letter, and may + // then be followed by letters, digits, '+', '-', + // and '.'. We don't validate the scheme here, we + // just use those restrictions as a basis for + // optimization; anything not in that set will + // mean the full URL is omitted. + for i := 0; i < len(scheme); i++ { + c := scheme[i] + switch { + case c >= 'a' && c <= 'z' || c >= '0' && c <= '9' || c == '+' || c == '-' || c == '.': + w.RawByte(c) + case c >= 'A' && c <= 'Z': + w.RawByte(c + 'a' - 'A') + default: + return false + } + } + return true +} + +func (v *URL) marshalFullURL(w *fastjson.Writer, scheme []byte) bool { + w.RawByte('"') + before := w.Size() + w.RawBytes(scheme) + w.RawString("://") + if strings.IndexByte(v.Hostname, ':') == -1 { + w.StringContents(v.Hostname) + } else { + w.RawByte('[') + w.StringContents(v.Hostname) + w.RawByte(']') + } + if v.Port != "" { + w.RawByte(':') + w.StringContents(v.Port) + } + if !strings.HasPrefix(v.Path, "/") { + w.RawByte('/') + } + w.StringContents(v.Path) + if v.Search != "" { + w.RawByte('?') + w.StringContents(v.Search) + } + if v.Hash != "" { + w.RawByte('#') + w.StringContents(v.Hash) + } + if n := w.Size() - before; n > 1024 { + // Truncate the full URL to 1024 bytes. + w.Rewind(w.Size() - n + 1024) + } + w.RawByte('"') + return true +} + +func (l *Log) isZero() bool { + return l.Message == "" +} + +func (e *Exception) isZero() bool { + return e.Message == "" +} + +func (c Cookies) isZero() bool { + return len(c) == 0 +} + +// MarshalFastJSON writes the JSON representation of c to w. +func (c Cookies) MarshalFastJSON(w *fastjson.Writer) error { + w.RawByte('{') + first := true +outer: + for i := len(c) - 1; i >= 0; i-- { + for j := i + 1; j < len(c); j++ { + if c[i].Name == c[j].Name { + continue outer + } + } + if first { + first = false + } else { + w.RawByte(',') + } + w.String(c[i].Name) + w.RawByte(':') + w.String(c[i].Value) + } + w.RawByte('}') + return nil +} + +// UnmarshalJSON unmarshals the JSON data into c. +func (c *Cookies) UnmarshalJSON(data []byte) error { + m := make(map[string]string) + if err := json.Unmarshal(data, &m); err != nil { + return err + } + *c = make([]*http.Cookie, 0, len(m)) + for k, v := range m { + *c = append(*c, &http.Cookie{ + Name: k, + Value: v, + }) + } + sort.Slice(*c, func(i, j int) bool { + return (*c)[i].Name < (*c)[j].Name + }) + return nil +} + +func (hs Headers) isZero() bool { + return len(hs) == 0 +} + +// MarshalFastJSON writes the JSON representation of h to w. +func (hs Headers) MarshalFastJSON(w *fastjson.Writer) error { + w.RawByte('{') + for i, h := range hs { + if i != 0 { + w.RawByte(',') + } + w.String(h.Key) + w.RawByte(':') + if len(h.Values) == 1 { + // Just one item, add the item directly. + w.String(h.Values[0]) + } else { + // Zero or multiple items, include them all. + w.RawByte('[') + for i, v := range h.Values { + if i != 0 { + w.RawByte(',') + } + w.String(v) + } + w.RawByte(']') + } + } + w.RawByte('}') + return nil +} + +// MarshalFastJSON writes the JSON representation of h to w. +func (*Header) MarshalFastJSON(w *fastjson.Writer) error { + panic("unreachable") +} + +// UnmarshalJSON unmarshals the JSON data into c. +func (hs *Headers) UnmarshalJSON(data []byte) error { + var m map[string]interface{} + if err := json.Unmarshal(data, &m); err != nil { + return err + } + for k, v := range m { + switch v := v.(type) { + case string: + *hs = append(*hs, Header{Key: k, Values: []string{v}}) + case []interface{}: + var values []string + for _, v := range v { + switch v := v.(type) { + case string: + values = append(values, v) + default: + return errors.Errorf("expected string, got %T", v) + } + } + *hs = append(*hs, Header{Key: k, Values: values}) + default: + return errors.Errorf("expected string or []string, got %T", v) + } + } + sort.Slice(*hs, func(i, j int) bool { + return (*hs)[i].Key < (*hs)[j].Key + }) + return nil +} + +// MarshalFastJSON writes the JSON representation of c to w. +func (c *ExceptionCode) MarshalFastJSON(w *fastjson.Writer) error { + if c.String != "" { + w.String(c.String) + } else { + w.Float64(c.Number) + } + return nil +} + +// UnmarshalJSON unmarshals the JSON data into c. +func (c *ExceptionCode) UnmarshalJSON(data []byte) error { + var v interface{} + if err := json.Unmarshal(data, &v); err != nil { + return err + } + switch v := v.(type) { + case string: + c.String = v + case float64: + c.Number = v + default: + return errors.Errorf("expected string or number, got %T", v) + } + return nil +} + +// isZero is used by fastjson to implement omitempty. +func (c *ExceptionCode) isZero() bool { + return c.String == "" && c.Number == 0 +} + +// MarshalFastJSON writes the JSON representation of b to w. +func (b *RequestBody) MarshalFastJSON(w *fastjson.Writer) error { + if b.Form != nil { + w.RawByte('{') + first := true + for k, v := range b.Form { + if first { + first = false + } else { + w.RawByte(',') + } + w.String(k) + w.RawByte(':') + if len(v) == 1 { + // Just one item, add the item directly. + w.String(v[0]) + } else { + // Zero or multiple items, include them all. + w.RawByte('[') + first := true + for _, v := range v { + if first { + first = false + } else { + w.RawByte(',') + } + w.String(v) + } + w.RawByte(']') + } + } + w.RawByte('}') + } else { + w.String(b.Raw) + } + return nil +} + +// UnmarshalJSON unmarshals the JSON data into b. +func (b *RequestBody) UnmarshalJSON(data []byte) error { + var v interface{} + if err := json.Unmarshal(data, &v); err != nil { + return err + } + switch v := v.(type) { + case string: + b.Raw = v + return nil + case map[string]interface{}: + form := make(url.Values, len(v)) + for k, v := range v { + switch v := v.(type) { + case string: + form.Set(k, v) + case []interface{}: + for _, v := range v { + switch v := v.(type) { + case string: + form.Add(k, v) + default: + return errors.Errorf("expected string, got %T", v) + } + } + default: + return errors.Errorf("expected string or []string, got %T", v) + } + } + b.Form = form + default: + return errors.Errorf("expected string or map, got %T", v) + } + return nil +} + +func (m StringMap) isZero() bool { + return len(m) == 0 +} + +// MarshalFastJSON writes the JSON representation of m to w. +func (m StringMap) MarshalFastJSON(w *fastjson.Writer) (firstErr error) { + w.RawByte('{') + first := true + for _, item := range m { + if first { + first = false + } else { + w.RawByte(',') + } + w.String(item.Key) + w.RawByte(':') + if err := fastjson.Marshal(w, item.Value); err != nil && firstErr == nil { + firstErr = err + } + } + w.RawByte('}') + return nil +} + +// UnmarshalJSON unmarshals the JSON data into m. +func (m *StringMap) UnmarshalJSON(data []byte) error { + var mm map[string]string + if err := json.Unmarshal(data, &mm); err != nil { + return err + } + *m = make(StringMap, 0, len(mm)) + for k, v := range mm { + *m = append(*m, StringMapItem{Key: k, Value: v}) + } + sort.Slice(*m, func(i, j int) bool { + return (*m)[i].Key < (*m)[j].Key + }) + return nil +} + +// MarshalFastJSON exists to prevent code generation for StringMapItem. +func (*StringMapItem) MarshalFastJSON(*fastjson.Writer) error { + panic("unreachable") +} + +func (m IfaceMap) isZero() bool { + return len(m) == 0 +} + +// MarshalFastJSON writes the JSON representation of m to w. +func (m IfaceMap) MarshalFastJSON(w *fastjson.Writer) (firstErr error) { + w.RawByte('{') + first := true + for _, item := range m { + if first { + first = false + } else { + w.RawByte(',') + } + w.String(item.Key) + w.RawByte(':') + if err := fastjson.Marshal(w, item.Value); err != nil && firstErr == nil { + firstErr = err + } + } + w.RawByte('}') + return nil +} + +// UnmarshalJSON unmarshals the JSON data into m. +func (m *IfaceMap) UnmarshalJSON(data []byte) error { + var mm map[string]interface{} + if err := json.Unmarshal(data, &mm); err != nil { + return err + } + *m = make(IfaceMap, 0, len(mm)) + for k, v := range mm { + *m = append(*m, IfaceMapItem{Key: k, Value: v}) + } + sort.Slice(*m, func(i, j int) bool { + return (*m)[i].Key < (*m)[j].Key + }) + return nil +} + +// MarshalFastJSON exists to prevent code generation for IfaceMapItem. +func (*IfaceMapItem) MarshalFastJSON(*fastjson.Writer) error { + panic("unreachable") +} + +func (id *TraceID) isZero() bool { + return *id == TraceID{} +} + +// MarshalFastJSON writes the JSON representation of id to w. +func (id *TraceID) MarshalFastJSON(w *fastjson.Writer) error { + w.RawByte('"') + writeHex(w, id[:]) + w.RawByte('"') + return nil +} + +// UnmarshalJSON unmarshals the JSON data into id. +func (id *TraceID) UnmarshalJSON(data []byte) error { + _, err := hex.Decode(id[:], data[1:len(data)-1]) + return err +} + +func (id *SpanID) isZero() bool { + return *id == SpanID{} +} + +// UnmarshalJSON unmarshals the JSON data into id. +func (id *SpanID) UnmarshalJSON(data []byte) error { + _, err := hex.Decode(id[:], data[1:len(data)-1]) + return err +} + +// MarshalFastJSON writes the JSON representation of id to w. +func (id *SpanID) MarshalFastJSON(w *fastjson.Writer) error { + w.RawByte('"') + writeHex(w, id[:]) + w.RawByte('"') + return nil +} + +func (t *ErrorTransaction) isZero() bool { + return *t == ErrorTransaction{} +} + +func (t *MetricsTransaction) isZero() bool { + return *t == MetricsTransaction{} +} + +func (s *MetricsSpan) isZero() bool { + return *s == MetricsSpan{} +} + +func writeHex(w *fastjson.Writer, v []byte) { + const hextable = "0123456789abcdef" + for _, v := range v { + w.RawByte(hextable[v>>4]) + w.RawByte(hextable[v&0x0f]) + } +} diff --git a/vendor/go.elastic.co/apm/model/marshal_fastjson.go b/vendor/go.elastic.co/apm/model/marshal_fastjson.go new file mode 100644 index 00000000000..cd5749c2533 --- /dev/null +++ b/vendor/go.elastic.co/apm/model/marshal_fastjson.go @@ -0,0 +1,1297 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated by "generate-fastjson". DO NOT EDIT. + +package model + +import ( + "go.elastic.co/fastjson" +) + +func (v *Service) MarshalFastJSON(w *fastjson.Writer) error { + var firstErr error + w.RawByte('{') + first := true + if v.Agent != nil { + const prefix = ",\"agent\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + if err := v.Agent.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if v.Environment != "" { + const prefix = ",\"environment\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.String(v.Environment) + } + if v.Framework != nil { + const prefix = ",\"framework\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + if err := v.Framework.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if v.Language != nil { + const prefix = ",\"language\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + if err := v.Language.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if v.Name != "" { + const prefix = ",\"name\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.String(v.Name) + } + if v.Node != nil { + const prefix = ",\"node\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + if err := v.Node.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if v.Runtime != nil { + const prefix = ",\"runtime\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + if err := v.Runtime.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if v.Version != "" { + const prefix = ",\"version\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.String(v.Version) + } + w.RawByte('}') + return firstErr +} + +func (v *Agent) MarshalFastJSON(w *fastjson.Writer) error { + w.RawByte('{') + w.RawString("\"name\":") + w.String(v.Name) + w.RawString(",\"version\":") + w.String(v.Version) + w.RawByte('}') + return nil +} + +func (v *Framework) MarshalFastJSON(w *fastjson.Writer) error { + w.RawByte('{') + w.RawString("\"name\":") + w.String(v.Name) + w.RawString(",\"version\":") + w.String(v.Version) + w.RawByte('}') + return nil +} + +func (v *Language) MarshalFastJSON(w *fastjson.Writer) error { + w.RawByte('{') + w.RawString("\"name\":") + w.String(v.Name) + if v.Version != "" { + w.RawString(",\"version\":") + w.String(v.Version) + } + w.RawByte('}') + return nil +} + +func (v *Runtime) MarshalFastJSON(w *fastjson.Writer) error { + w.RawByte('{') + w.RawString("\"name\":") + w.String(v.Name) + w.RawString(",\"version\":") + w.String(v.Version) + w.RawByte('}') + return nil +} + +func (v *ServiceNode) MarshalFastJSON(w *fastjson.Writer) error { + w.RawByte('{') + if v.ConfiguredName != "" { + w.RawString("\"configured_name\":") + w.String(v.ConfiguredName) + } + w.RawByte('}') + return nil +} + +func (v *System) MarshalFastJSON(w *fastjson.Writer) error { + var firstErr error + w.RawByte('{') + first := true + if v.Architecture != "" { + const prefix = ",\"architecture\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.String(v.Architecture) + } + if v.Container != nil { + const prefix = ",\"container\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + if err := v.Container.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if v.Hostname != "" { + const prefix = ",\"hostname\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.String(v.Hostname) + } + if v.Kubernetes != nil { + const prefix = ",\"kubernetes\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + if err := v.Kubernetes.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if v.Platform != "" { + const prefix = ",\"platform\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.String(v.Platform) + } + w.RawByte('}') + return firstErr +} + +func (v *Process) MarshalFastJSON(w *fastjson.Writer) error { + w.RawByte('{') + w.RawString("\"pid\":") + w.Int64(int64(v.Pid)) + if v.Argv != nil { + w.RawString(",\"argv\":") + w.RawByte('[') + for i, v := range v.Argv { + if i != 0 { + w.RawByte(',') + } + w.String(v) + } + w.RawByte(']') + } + if v.Ppid != nil { + w.RawString(",\"ppid\":") + w.Int64(int64(*v.Ppid)) + } + if v.Title != "" { + w.RawString(",\"title\":") + w.String(v.Title) + } + w.RawByte('}') + return nil +} + +func (v *Container) MarshalFastJSON(w *fastjson.Writer) error { + w.RawByte('{') + w.RawString("\"id\":") + w.String(v.ID) + w.RawByte('}') + return nil +} + +func (v *Kubernetes) MarshalFastJSON(w *fastjson.Writer) error { + var firstErr error + w.RawByte('{') + first := true + if v.Namespace != "" { + const prefix = ",\"namespace\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.String(v.Namespace) + } + if v.Node != nil { + const prefix = ",\"node\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + if err := v.Node.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if v.Pod != nil { + const prefix = ",\"pod\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + if err := v.Pod.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + w.RawByte('}') + return firstErr +} + +func (v *KubernetesNode) MarshalFastJSON(w *fastjson.Writer) error { + w.RawByte('{') + if v.Name != "" { + w.RawString("\"name\":") + w.String(v.Name) + } + w.RawByte('}') + return nil +} + +func (v *KubernetesPod) MarshalFastJSON(w *fastjson.Writer) error { + w.RawByte('{') + first := true + if v.Name != "" { + const prefix = ",\"name\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.String(v.Name) + } + if v.UID != "" { + const prefix = ",\"uid\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.String(v.UID) + } + w.RawByte('}') + return nil +} + +func (v *Transaction) MarshalFastJSON(w *fastjson.Writer) error { + var firstErr error + w.RawByte('{') + w.RawString("\"duration\":") + w.Float64(v.Duration) + w.RawString(",\"id\":") + if err := v.ID.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + w.RawString(",\"name\":") + w.String(v.Name) + w.RawString(",\"span_count\":") + if err := v.SpanCount.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + w.RawString(",\"timestamp\":") + if err := v.Timestamp.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + w.RawString(",\"trace_id\":") + if err := v.TraceID.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + w.RawString(",\"type\":") + w.String(v.Type) + if v.Context != nil { + w.RawString(",\"context\":") + if err := v.Context.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if !v.ParentID.isZero() { + w.RawString(",\"parent_id\":") + if err := v.ParentID.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if v.Result != "" { + w.RawString(",\"result\":") + w.String(v.Result) + } + if v.Sampled != nil { + w.RawString(",\"sampled\":") + w.Bool(*v.Sampled) + } + w.RawByte('}') + return firstErr +} + +func (v *SpanCount) MarshalFastJSON(w *fastjson.Writer) error { + w.RawByte('{') + w.RawString("\"dropped\":") + w.Int64(int64(v.Dropped)) + w.RawString(",\"started\":") + w.Int64(int64(v.Started)) + w.RawByte('}') + return nil +} + +func (v *Span) MarshalFastJSON(w *fastjson.Writer) error { + var firstErr error + w.RawByte('{') + w.RawString("\"duration\":") + w.Float64(v.Duration) + w.RawString(",\"id\":") + if err := v.ID.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + w.RawString(",\"name\":") + w.String(v.Name) + w.RawString(",\"timestamp\":") + if err := v.Timestamp.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + w.RawString(",\"trace_id\":") + if err := v.TraceID.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + w.RawString(",\"transaction_id\":") + if err := v.TransactionID.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + w.RawString(",\"type\":") + w.String(v.Type) + if v.Action != "" { + w.RawString(",\"action\":") + w.String(v.Action) + } + if v.Context != nil { + w.RawString(",\"context\":") + if err := v.Context.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if !v.ParentID.isZero() { + w.RawString(",\"parent_id\":") + if err := v.ParentID.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if v.Stacktrace != nil { + w.RawString(",\"stacktrace\":") + w.RawByte('[') + for i, v := range v.Stacktrace { + if i != 0 { + w.RawByte(',') + } + if err := v.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + w.RawByte(']') + } + if v.Subtype != "" { + w.RawString(",\"subtype\":") + w.String(v.Subtype) + } + w.RawByte('}') + return firstErr +} + +func (v *SpanContext) MarshalFastJSON(w *fastjson.Writer) error { + var firstErr error + w.RawByte('{') + first := true + if v.Database != nil { + const prefix = ",\"db\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + if err := v.Database.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if v.Destination != nil { + const prefix = ",\"destination\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + if err := v.Destination.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if v.HTTP != nil { + const prefix = ",\"http\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + if err := v.HTTP.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if !v.Tags.isZero() { + const prefix = ",\"tags\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + if err := v.Tags.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + w.RawByte('}') + return firstErr +} + +func (v *DestinationSpanContext) MarshalFastJSON(w *fastjson.Writer) error { + var firstErr error + w.RawByte('{') + first := true + if v.Address != "" { + const prefix = ",\"address\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.String(v.Address) + } + if v.Port != 0 { + const prefix = ",\"port\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.Int64(int64(v.Port)) + } + if v.Service != nil { + const prefix = ",\"service\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + if err := v.Service.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + w.RawByte('}') + return firstErr +} + +func (v *DestinationServiceSpanContext) MarshalFastJSON(w *fastjson.Writer) error { + w.RawByte('{') + first := true + if v.Name != "" { + const prefix = ",\"name\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.String(v.Name) + } + if v.Resource != "" { + const prefix = ",\"resource\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.String(v.Resource) + } + if v.Type != "" { + const prefix = ",\"type\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.String(v.Type) + } + w.RawByte('}') + return nil +} + +func (v *DatabaseSpanContext) MarshalFastJSON(w *fastjson.Writer) error { + w.RawByte('{') + first := true + if v.Instance != "" { + const prefix = ",\"instance\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.String(v.Instance) + } + if v.RowsAffected != nil { + const prefix = ",\"rows_affected\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.Int64(*v.RowsAffected) + } + if v.Statement != "" { + const prefix = ",\"statement\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.String(v.Statement) + } + if v.Type != "" { + const prefix = ",\"type\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.String(v.Type) + } + if v.User != "" { + const prefix = ",\"user\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.String(v.User) + } + w.RawByte('}') + return nil +} + +func (v *Context) MarshalFastJSON(w *fastjson.Writer) error { + var firstErr error + w.RawByte('{') + first := true + if !v.Custom.isZero() { + const prefix = ",\"custom\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + if err := v.Custom.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if v.Request != nil { + const prefix = ",\"request\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + if err := v.Request.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if v.Response != nil { + const prefix = ",\"response\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + if err := v.Response.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if v.Service != nil { + const prefix = ",\"service\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + if err := v.Service.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if !v.Tags.isZero() { + const prefix = ",\"tags\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + if err := v.Tags.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if v.User != nil { + const prefix = ",\"user\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + if err := v.User.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + w.RawByte('}') + return firstErr +} + +func (v *User) MarshalFastJSON(w *fastjson.Writer) error { + w.RawByte('{') + first := true + if v.Email != "" { + const prefix = ",\"email\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.String(v.Email) + } + if v.ID != "" { + const prefix = ",\"id\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.String(v.ID) + } + if v.Username != "" { + const prefix = ",\"username\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.String(v.Username) + } + w.RawByte('}') + return nil +} + +func (v *Error) MarshalFastJSON(w *fastjson.Writer) error { + var firstErr error + w.RawByte('{') + w.RawString("\"id\":") + if err := v.ID.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + w.RawString(",\"timestamp\":") + if err := v.Timestamp.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + if v.Context != nil { + w.RawString(",\"context\":") + if err := v.Context.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if v.Culprit != "" { + w.RawString(",\"culprit\":") + w.String(v.Culprit) + } + if !v.Exception.isZero() { + w.RawString(",\"exception\":") + if err := v.Exception.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if !v.Log.isZero() { + w.RawString(",\"log\":") + if err := v.Log.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if !v.ParentID.isZero() { + w.RawString(",\"parent_id\":") + if err := v.ParentID.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if !v.TraceID.isZero() { + w.RawString(",\"trace_id\":") + if err := v.TraceID.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if !v.Transaction.isZero() { + w.RawString(",\"transaction\":") + if err := v.Transaction.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if !v.TransactionID.isZero() { + w.RawString(",\"transaction_id\":") + if err := v.TransactionID.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + w.RawByte('}') + return firstErr +} + +func (v *ErrorTransaction) MarshalFastJSON(w *fastjson.Writer) error { + w.RawByte('{') + first := true + if v.Sampled != nil { + const prefix = ",\"sampled\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.Bool(*v.Sampled) + } + if v.Type != "" { + const prefix = ",\"type\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.String(v.Type) + } + w.RawByte('}') + return nil +} + +func (v *Exception) MarshalFastJSON(w *fastjson.Writer) error { + var firstErr error + w.RawByte('{') + w.RawString("\"handled\":") + w.Bool(v.Handled) + w.RawString(",\"message\":") + w.String(v.Message) + if v.Attributes != nil { + w.RawString(",\"attributes\":") + w.RawByte('{') + { + first := true + for k, v := range v.Attributes { + if first { + first = false + } else { + w.RawByte(',') + } + w.String(k) + w.RawByte(':') + if err := fastjson.Marshal(w, v); err != nil && firstErr == nil { + firstErr = err + } + } + } + w.RawByte('}') + } + if v.Cause != nil { + w.RawString(",\"cause\":") + w.RawByte('[') + for i, v := range v.Cause { + if i != 0 { + w.RawByte(',') + } + if err := v.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + w.RawByte(']') + } + if !v.Code.isZero() { + w.RawString(",\"code\":") + if err := v.Code.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if v.Module != "" { + w.RawString(",\"module\":") + w.String(v.Module) + } + if v.Stacktrace != nil { + w.RawString(",\"stacktrace\":") + w.RawByte('[') + for i, v := range v.Stacktrace { + if i != 0 { + w.RawByte(',') + } + if err := v.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + w.RawByte(']') + } + if v.Type != "" { + w.RawString(",\"type\":") + w.String(v.Type) + } + w.RawByte('}') + return firstErr +} + +func (v *StacktraceFrame) MarshalFastJSON(w *fastjson.Writer) error { + var firstErr error + w.RawByte('{') + w.RawString("\"filename\":") + w.String(v.File) + w.RawString(",\"lineno\":") + w.Int64(int64(v.Line)) + if v.AbsolutePath != "" { + w.RawString(",\"abs_path\":") + w.String(v.AbsolutePath) + } + if v.Column != nil { + w.RawString(",\"colno\":") + w.Int64(int64(*v.Column)) + } + if v.ContextLine != "" { + w.RawString(",\"context_line\":") + w.String(v.ContextLine) + } + if v.Function != "" { + w.RawString(",\"function\":") + w.String(v.Function) + } + if v.LibraryFrame != false { + w.RawString(",\"library_frame\":") + w.Bool(v.LibraryFrame) + } + if v.Module != "" { + w.RawString(",\"module\":") + w.String(v.Module) + } + if v.PostContext != nil { + w.RawString(",\"post_context\":") + w.RawByte('[') + for i, v := range v.PostContext { + if i != 0 { + w.RawByte(',') + } + w.String(v) + } + w.RawByte(']') + } + if v.PreContext != nil { + w.RawString(",\"pre_context\":") + w.RawByte('[') + for i, v := range v.PreContext { + if i != 0 { + w.RawByte(',') + } + w.String(v) + } + w.RawByte(']') + } + if v.Vars != nil { + w.RawString(",\"vars\":") + w.RawByte('{') + { + first := true + for k, v := range v.Vars { + if first { + first = false + } else { + w.RawByte(',') + } + w.String(k) + w.RawByte(':') + if err := fastjson.Marshal(w, v); err != nil && firstErr == nil { + firstErr = err + } + } + } + w.RawByte('}') + } + w.RawByte('}') + return firstErr +} + +func (v *Log) MarshalFastJSON(w *fastjson.Writer) error { + var firstErr error + w.RawByte('{') + w.RawString("\"message\":") + w.String(v.Message) + if v.Level != "" { + w.RawString(",\"level\":") + w.String(v.Level) + } + if v.LoggerName != "" { + w.RawString(",\"logger_name\":") + w.String(v.LoggerName) + } + if v.ParamMessage != "" { + w.RawString(",\"param_message\":") + w.String(v.ParamMessage) + } + if v.Stacktrace != nil { + w.RawString(",\"stacktrace\":") + w.RawByte('[') + for i, v := range v.Stacktrace { + if i != 0 { + w.RawByte(',') + } + if err := v.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + w.RawByte(']') + } + w.RawByte('}') + return firstErr +} + +func (v *Request) MarshalFastJSON(w *fastjson.Writer) error { + var firstErr error + w.RawByte('{') + w.RawString("\"method\":") + w.String(v.Method) + w.RawString(",\"url\":") + if err := v.URL.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + if v.Body != nil { + w.RawString(",\"body\":") + if err := v.Body.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if !v.Cookies.isZero() { + w.RawString(",\"cookies\":") + if err := v.Cookies.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if v.Env != nil { + w.RawString(",\"env\":") + w.RawByte('{') + { + first := true + for k, v := range v.Env { + if first { + first = false + } else { + w.RawByte(',') + } + w.String(k) + w.RawByte(':') + w.String(v) + } + } + w.RawByte('}') + } + if !v.Headers.isZero() { + w.RawString(",\"headers\":") + if err := v.Headers.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if v.HTTPVersion != "" { + w.RawString(",\"http_version\":") + w.String(v.HTTPVersion) + } + if v.Socket != nil { + w.RawString(",\"socket\":") + if err := v.Socket.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + w.RawByte('}') + return firstErr +} + +func (v *RequestSocket) MarshalFastJSON(w *fastjson.Writer) error { + w.RawByte('{') + first := true + if v.Encrypted != false { + const prefix = ",\"encrypted\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.Bool(v.Encrypted) + } + if v.RemoteAddress != "" { + const prefix = ",\"remote_address\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.String(v.RemoteAddress) + } + w.RawByte('}') + return nil +} + +func (v *Response) MarshalFastJSON(w *fastjson.Writer) error { + var firstErr error + w.RawByte('{') + first := true + if v.Finished != nil { + const prefix = ",\"finished\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.Bool(*v.Finished) + } + if !v.Headers.isZero() { + const prefix = ",\"headers\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + if err := v.Headers.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if v.HeadersSent != nil { + const prefix = ",\"headers_sent\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.Bool(*v.HeadersSent) + } + if v.StatusCode != 0 { + const prefix = ",\"status_code\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.Int64(int64(v.StatusCode)) + } + w.RawByte('}') + return firstErr +} + +func (v *Metrics) MarshalFastJSON(w *fastjson.Writer) error { + var firstErr error + w.RawByte('{') + w.RawString("\"samples\":") + if v.Samples == nil { + w.RawString("null") + } else { + w.RawByte('{') + { + first := true + for k, v := range v.Samples { + if first { + first = false + } else { + w.RawByte(',') + } + w.String(k) + w.RawByte(':') + if err := v.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + } + w.RawByte('}') + } + w.RawString(",\"timestamp\":") + if err := v.Timestamp.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + if !v.Span.isZero() { + w.RawString(",\"span\":") + if err := v.Span.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if !v.Labels.isZero() { + w.RawString(",\"tags\":") + if err := v.Labels.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + if !v.Transaction.isZero() { + w.RawString(",\"transaction\":") + if err := v.Transaction.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + } + w.RawByte('}') + return firstErr +} + +func (v *MetricsTransaction) MarshalFastJSON(w *fastjson.Writer) error { + w.RawByte('{') + first := true + if v.Name != "" { + const prefix = ",\"name\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.String(v.Name) + } + if v.Type != "" { + const prefix = ",\"type\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.String(v.Type) + } + w.RawByte('}') + return nil +} + +func (v *MetricsSpan) MarshalFastJSON(w *fastjson.Writer) error { + w.RawByte('{') + first := true + if v.Subtype != "" { + const prefix = ",\"subtype\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.String(v.Subtype) + } + if v.Type != "" { + const prefix = ",\"type\":" + if first { + first = false + w.RawString(prefix[1:]) + } else { + w.RawString(prefix) + } + w.String(v.Type) + } + w.RawByte('}') + return nil +} + +func (v *Metric) MarshalFastJSON(w *fastjson.Writer) error { + w.RawByte('{') + w.RawString("\"value\":") + w.Float64(v.Value) + w.RawByte('}') + return nil +} diff --git a/vendor/go.elastic.co/apm/model/model.go b/vendor/go.elastic.co/apm/model/model.go new file mode 100644 index 00000000000..71568d4c0d6 --- /dev/null +++ b/vendor/go.elastic.co/apm/model/model.go @@ -0,0 +1,671 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package model + +import ( + "net/http" + "net/url" + "time" +) + +// Service represents the service handling transactions being traced. +type Service struct { + // Name is the immutable name of the service. + Name string `json:"name,omitempty"` + + // Version is the version of the service, if it has one. + Version string `json:"version,omitempty"` + + // Environment is the name of the service's environment, if it has + // one, e.g. "production" or "staging". + Environment string `json:"environment,omitempty"` + + // Agent holds information about the Elastic APM agent tracing this + // service's transactions. + Agent *Agent `json:"agent,omitempty"` + + // Framework holds information about the service's framework, if any. + Framework *Framework `json:"framework,omitempty"` + + // Language holds information about the programming language in which + // the service is written. + Language *Language `json:"language,omitempty"` + + // Runtime holds information about the programming language runtime + // running this service. + Runtime *Runtime `json:"runtime,omitempty"` + + // Node holds unique information about each service node + Node *ServiceNode `json:"node,omitempty"` +} + +// Agent holds information about the Elastic APM agent. +type Agent struct { + // Name is the name of the Elastic APM agent, e.g. "Go". + Name string `json:"name"` + + // Version is the version of the Elastic APM agent, e.g. "1.0.0". + Version string `json:"version"` +} + +// Framework holds information about the framework (typically web) +// used by the service. +type Framework struct { + // Name is the name of the framework. + Name string `json:"name"` + + // Version is the version of the framework. + Version string `json:"version"` +} + +// Language holds information about the programming language used. +type Language struct { + // Name is the name of the programming language. + Name string `json:"name"` + + // Version is the version of the programming language. + Version string `json:"version,omitempty"` +} + +// Runtime holds information about the programming language runtime. +type Runtime struct { + // Name is the name of the programming language runtime. + Name string `json:"name"` + + // Version is the version of the programming language runtime. + Version string `json:"version"` +} + +// ServiceNode holds unique information about each service node +type ServiceNode struct { + // ConfiguredName holds the name of the service node + ConfiguredName string `json:"configured_name,omitempty"` +} + +// System represents the system (operating system and machine) running the +// service. +type System struct { + // Architecture is the system's hardware architecture. + Architecture string `json:"architecture,omitempty"` + + // Hostname is the system's hostname. + Hostname string `json:"hostname,omitempty"` + + // Platform is the system's platform, or operating system name. + Platform string `json:"platform,omitempty"` + + // Container describes the container running the service. + Container *Container `json:"container,omitempty"` + + // Kubernetes describes the kubernetes node and pod running the service. + Kubernetes *Kubernetes `json:"kubernetes,omitempty"` +} + +// Process represents an operating system process. +type Process struct { + // Pid is the process ID. + Pid int `json:"pid"` + + // Ppid is the parent process ID, if known. + Ppid *int `json:"ppid,omitempty"` + + // Title is the title of the process. + Title string `json:"title,omitempty"` + + // Argv holds the command line arguments used to start the process. + Argv []string `json:"argv,omitempty"` +} + +// Container represents the container (e.g. Docker) running the service. +type Container struct { + // ID is the unique container ID. + ID string `json:"id"` +} + +// Kubernetes describes properties of the Kubernetes node and pod in which +// the service is running. +type Kubernetes struct { + // Namespace names the Kubernetes namespace in which the pod exists. + Namespace string `json:"namespace,omitempty"` + + // Node describes the Kubernetes node running the service's pod. + Node *KubernetesNode `json:"node,omitempty"` + + // Pod describes the Kubernetes pod running the service. + Pod *KubernetesPod `json:"pod,omitempty"` +} + +// KubernetesNode describes a Kubernetes node. +type KubernetesNode struct { + // Name holds the node name. + Name string `json:"name,omitempty"` +} + +// KubernetesPod describes a Kubernetes pod. +type KubernetesPod struct { + // Name holds the pod name. + Name string `json:"name,omitempty"` + + // UID holds the pod UID. + UID string `json:"uid,omitempty"` +} + +// Transaction represents a transaction handled by the service. +type Transaction struct { + // ID holds the 64-bit hex-encoded transaction ID. + ID SpanID `json:"id"` + + // TraceID holds the ID of the trace that this transaction is a part of. + TraceID TraceID `json:"trace_id"` + + // ParentID holds the ID of the transaction's parent span or transaction. + ParentID SpanID `json:"parent_id,omitempty"` + + // Name holds the name of the transaction. + Name string `json:"name"` + + // Type identifies the service-domain specific type of the request, + // e.g. "request" or "backgroundjob". + Type string `json:"type"` + + // Timestamp holds the time at which the transaction started. + Timestamp Time `json:"timestamp"` + + // Duration records how long the transaction took to complete, + // in milliseconds. + Duration float64 `json:"duration"` + + // Result holds the result of the transaction, e.g. the status code + // for HTTP requests. + Result string `json:"result,omitempty"` + + // Context holds contextual information relating to the transaction. + Context *Context `json:"context,omitempty"` + + // Sampled indicates that the transaction was sampled, and + // includes all available information. Non-sampled transactions + // omit Context. + // + // If Sampled is unspecified (nil), it is equivalent to setting + // it to true. + Sampled *bool `json:"sampled,omitempty"` + + // SpanCount holds statistics on spans within a transaction. + SpanCount SpanCount `json:"span_count"` +} + +// SpanCount holds statistics on spans within a transaction. +type SpanCount struct { + // Dropped holds the number of spans dropped within a transaction. + // This does not include spans that were started and dropped due + // to full buffers, network errors, etc. + Dropped int `json:"dropped"` + + // Started holds the number of spans started within a transaction. + Started int `json:"started"` +} + +// Span represents a span within a transaction. +type Span struct { + // Name holds the name of the span. + Name string `json:"name"` + + // Timestamp holds the time at which the span started. + Timestamp Time `json:"timestamp"` + + // Duration holds the duration of the span, in milliseconds. + Duration float64 `json:"duration"` + + // Type identifies the overarching type of the span, + // e.g. "db" or "external". + Type string `json:"type"` + + // Subtype identifies the subtype of the span, + // e.g. "mysql" or "http". + Subtype string `json:"subtype,omitempty"` + + // Action identifies the action that is being undertaken, e.g. "query". + Action string `json:"action,omitempty"` + + // ID holds the ID of the span. + ID SpanID `json:"id"` + + // TransactionID holds the ID of the transaction of which the span is a part. + TransactionID SpanID `json:"transaction_id"` + + // TraceID holds the ID of the trace that this span is a part of. + TraceID TraceID `json:"trace_id"` + + // ParentID holds the ID of the span's parent (span or transaction). + ParentID SpanID `json:"parent_id,omitempty"` + + // Context holds contextual information relating to the span. + Context *SpanContext `json:"context,omitempty"` + + // Stacktrace holds stack frames corresponding to the span. + Stacktrace []StacktraceFrame `json:"stacktrace,omitempty"` +} + +// SpanContext holds contextual information relating to the span. +type SpanContext struct { + // Destination holds information about a destination service. + Destination *DestinationSpanContext `json:"destination,omitempty"` + + // Database holds contextual information for database + // operation spans. + Database *DatabaseSpanContext `json:"db,omitempty"` + + // HTTP holds contextual information for HTTP client request spans. + HTTP *HTTPSpanContext `json:"http,omitempty"` + + // Tags holds user-defined key/value pairs. + Tags IfaceMap `json:"tags,omitempty"` +} + +// DestinationSpanContext holds contextual information about the destination +// for a span that relates to an operation involving an external service. +type DestinationSpanContext struct { + // Address holds the network address of the destination service. + // This may be a hostname, FQDN, or (IPv4 or IPv6) network address. + Address string `json:"address,omitempty"` + + // Port holds the network port for the destination service. + Port int `json:"port,omitempty"` + + // Service holds additional destination service context. + Service *DestinationServiceSpanContext `json:"service,omitempty"` +} + +// DestinationServiceSpanContext holds contextual information about a +// destination service,. +type DestinationServiceSpanContext struct { + // Type holds the destination service type. + Type string `json:"type,omitempty"` + + // Name holds the destination service name. + Name string `json:"name,omitempty"` + + // Resource identifies the destination service + // resource, e.g. a URI or message queue name. + Resource string `json:"resource,omitempty"` +} + +// DatabaseSpanContext holds contextual information for database +// operation spans. +type DatabaseSpanContext struct { + // Instance holds the database instance name. + Instance string `json:"instance,omitempty"` + + // Statement holds the database statement (e.g. query). + Statement string `json:"statement,omitempty"` + + // RowsAffected holds the number of rows affected by the + // database operation. + RowsAffected *int64 `json:"rows_affected,omitempty"` + + // Type holds the database type. For any SQL database, + // this should be "sql"; for others, the lower-cased + // database category, e.g. "cassandra", "hbase", "redis". + Type string `json:"type,omitempty"` + + // User holds the username used for database access. + User string `json:"user,omitempty"` +} + +// HTTPSpanContext holds contextual information for HTTP client request spans. +type HTTPSpanContext struct { + // URL is the request URL. + URL *url.URL + + // StatusCode holds the HTTP response status code. + StatusCode int `json:"status_code,omitempty"` +} + +// Context holds contextual information relating to a transaction or error. +type Context struct { + // Custom holds custom context relating to the transaction or error. + Custom IfaceMap `json:"custom,omitempty"` + + // Request holds details of the HTTP request relating to the + // transaction or error, if relevant. + Request *Request `json:"request,omitempty"` + + // Response holds details of the HTTP response relating to the + // transaction or error, if relevant. + Response *Response `json:"response,omitempty"` + + // User holds details of the authenticated user relating to the + // transaction or error, if relevant. + User *User `json:"user,omitempty"` + + // Tags holds user-defined key/value pairs. + Tags IfaceMap `json:"tags,omitempty"` + + // Service holds values to overrides service-level metadata. + Service *Service `json:"service,omitempty"` +} + +// User holds information about an authenticated user. +type User struct { + // Username holds the username of the user. + Username string `json:"username,omitempty"` + + // ID identifies the user, e.g. a primary key. This may be + // a string or number. + ID string `json:"id,omitempty"` + + // Email holds the email address of the user. + Email string `json:"email,omitempty"` +} + +// Error represents an error occurring in the service. +type Error struct { + // Timestamp holds the time at which the error occurred. + Timestamp Time `json:"timestamp"` + + // ID holds the 128-bit hex-encoded error ID. + ID TraceID `json:"id"` + + // TraceID holds the ID of the trace within which the error occurred. + TraceID TraceID `json:"trace_id,omitempty"` + + // ParentID holds the ID of the transaction within which the error + // occurred. + ParentID SpanID `json:"parent_id,omitempty"` + + // TransactionID holds the ID of the transaction within which the error occurred. + TransactionID SpanID `json:"transaction_id,omitempty"` + + // Culprit holds the name of the function which + // produced the error. + Culprit string `json:"culprit,omitempty"` + + // Context holds contextual information relating to the error. + Context *Context `json:"context,omitempty"` + + // Exception holds details of the exception (error or panic) + // to which this error relates. + Exception Exception `json:"exception,omitempty"` + + // Log holds additional information added when logging the error. + Log Log `json:"log,omitempty"` + + // Transaction holds information about the transaction within which the error occurred. + Transaction ErrorTransaction `json:"transaction,omitempty"` +} + +// ErrorTransaction holds information about the transaction within which an error occurred. +type ErrorTransaction struct { + // Sampled indicates that the transaction was sampled. + Sampled *bool `json:"sampled,omitempty"` + + // Type holds the transaction type. + Type string `json:"type,omitempty"` +} + +// Exception represents an exception: an error or panic. +type Exception struct { + // Message holds the error message. + Message string `json:"message"` + + // Code holds the error code. This may be a number or a string. + Code ExceptionCode `json:"code,omitempty"` + + // Type holds the type of the exception. + Type string `json:"type,omitempty"` + + // Module holds the exception type's module namespace. + Module string `json:"module,omitempty"` + + // Attributes holds arbitrary exception-type specific attributes. + Attributes map[string]interface{} `json:"attributes,omitempty"` + + // Stacktrace holds stack frames corresponding to the exception. + Stacktrace []StacktraceFrame `json:"stacktrace,omitempty"` + + // Handled indicates whether or not the error was caught and handled. + Handled bool `json:"handled"` + + // Cause holds the causes of this error. + Cause []Exception `json:"cause,omitempty"` +} + +// ExceptionCode represents an exception code as either a number or a string. +type ExceptionCode struct { + String string + Number float64 +} + +// StacktraceFrame describes a stack frame. +type StacktraceFrame struct { + // AbsolutePath holds the absolute path of the source file for the + // stack frame. + AbsolutePath string `json:"abs_path,omitempty"` + + // File holds the base filename of the source file for the stack frame. + File string `json:"filename"` + + // Line holds the line number of the source for the stack frame. + Line int `json:"lineno"` + + // Column holds the column number of the source for the stack frame. + Column *int `json:"colno,omitempty"` + + // Module holds the module to which the frame belongs. For Go, we + // use the package path (e.g. "net/http"). + Module string `json:"module,omitempty"` + + // Function holds the name of the function to which the frame belongs. + Function string `json:"function,omitempty"` + + // LibraryFrame indicates whether or not the frame corresponds to + // library or user code. + LibraryFrame bool `json:"library_frame,omitempty"` + + // ContextLine holds the line of source code to which the frame + // corresponds. + ContextLine string `json:"context_line,omitempty"` + + // PreContext holds zero or more lines of source code preceding the + // line corresponding to the frame. + PreContext []string `json:"pre_context,omitempty"` + + // PostContext holds zero or more lines of source code proceeding the + // line corresponding to the frame. + PostContext []string `json:"post_context,omitempty"` + + // Vars holds local variables for this stack frame. + Vars map[string]interface{} `json:"vars,omitempty"` +} + +// Log holds additional information added when logging an error. +type Log struct { + // Message holds the logged error message. + Message string `json:"message"` + + // Level holds the severity of the log record. + Level string `json:"level,omitempty"` + + // LoggerName holds the name of the logger used. + LoggerName string `json:"logger_name,omitempty"` + + // ParamMessage holds a parameterized message, e.g. + // "Could not connect to %s". The string is not interpreted, + // but may be used for grouping errors. + ParamMessage string `json:"param_message,omitempty"` + + // Stacktrace holds stack frames corresponding to the error. + Stacktrace []StacktraceFrame `json:"stacktrace,omitempty"` +} + +// Request represents an HTTP request. +type Request struct { + // URL is the request URL. + URL URL `json:"url"` + + // Method holds the HTTP request method. + Method string `json:"method"` + + // Headers holds the request headers. + Headers Headers `json:"headers,omitempty"` + + // Body holds the request body, if body capture is enabled. + Body *RequestBody `json:"body,omitempty"` + + // HTTPVersion holds the HTTP version of the request. + HTTPVersion string `json:"http_version,omitempty"` + + // Cookies holds the parsed cookies. + Cookies Cookies `json:"cookies,omitempty"` + + // Env holds environment information passed from the + // web framework to the request handler. + Env map[string]string `json:"env,omitempty"` + + // Socket holds transport-level information. + Socket *RequestSocket `json:"socket,omitempty"` +} + +// Cookies holds a collection of HTTP cookies. +type Cookies []*http.Cookie + +// RequestBody holds a request body. +// +// Exactly one of Raw or Form must be set. +type RequestBody struct { + // Raw holds the raw body content. + Raw string + + // Form holds the form data from POST, PATCH, or PUT body parameters. + Form url.Values +} + +// Headers holds a collection of HTTP headers. +type Headers []Header + +// Header holds an HTTP header, with one or more values. +type Header struct { + Key string + Values []string +} + +// RequestSocket holds transport-level information relating to an HTTP request. +type RequestSocket struct { + // Encrypted indicates whether or not the request was sent + // as an SSL/HTTPS request. + Encrypted bool `json:"encrypted,omitempty"` + + // RemoteAddress holds the remote address for the request. + RemoteAddress string `json:"remote_address,omitempty"` +} + +// URL represents a server-side (transaction) request URL, +// broken down into its constituent parts. +type URL struct { + // Full is the full URL, e.g. + // "https://example.com:443/search/?q=elasticsearch#top". + Full string `json:"full,omitempty"` + + // Protocol is the scheme of the URL, e.g. "https". + Protocol string `json:"protocol,omitempty"` + + // Hostname is the hostname for the URL, e.g. "example.com". + Hostname string `json:"hostname,omitempty"` + + // Port is the port number in the URL, e.g. "443". + Port string `json:"port,omitempty"` + + // Path is the path of the URL, e.g. "/search". + Path string `json:"pathname,omitempty"` + + // Search is the query string of the URL, e.g. "q=elasticsearch". + Search string `json:"search,omitempty"` + + // Hash is the fragment for references, e.g. "top" in the + // URL example provided for Full. + Hash string `json:"hash,omitempty"` +} + +// Response represents an HTTP response. +type Response struct { + // StatusCode holds the HTTP response status code. + StatusCode int `json:"status_code,omitempty"` + + // Headers holds the response headers. + Headers Headers `json:"headers,omitempty"` + + // HeadersSent indicates whether or not headers were sent + // to the client. + HeadersSent *bool `json:"headers_sent,omitempty"` + + // Finished indicates whether or not the response was finished. + Finished *bool `json:"finished,omitempty"` +} + +// Time is a timestamp, formatted as a number of microseconds since January 1, 1970 UTC. +type Time time.Time + +// TraceID holds a 128-bit trace ID. +type TraceID [16]byte + +// SpanID holds a 64-bit span ID. Despite its name, this is used for +// both spans and transactions. +type SpanID [8]byte + +// Metrics holds a set of metric samples, with an optional set of labels. +type Metrics struct { + // Timestamp holds the time at which the metric samples were taken. + Timestamp Time `json:"timestamp"` + + // Transaction optionally holds the name and type of transactions + // with which these metrics are associated. + Transaction MetricsTransaction `json:"transaction,omitempty"` + + // Span optionally holds the type and subtype of the spans with + // which these metrics are associated. + Span MetricsSpan `json:"span,omitempty"` + + // Labels holds a set of labels associated with the metrics. + // The labels apply uniformly to all metric samples in the set. + // + // NOTE(axw) the schema calls the field "tags", but we use + // "labels" for agent-internal consistency. Labels aligns better + // with the common schema, anyway. + Labels StringMap `json:"tags,omitempty"` + + // Samples holds a map of metric samples, keyed by metric name. + Samples map[string]Metric `json:"samples"` +} + +// MetricsTransaction holds transaction identifiers for metrics. +type MetricsTransaction struct { + Type string `json:"type,omitempty"` + Name string `json:"name,omitempty"` +} + +// MetricsSpan holds span identifiers for metrics. +type MetricsSpan struct { + Type string `json:"type,omitempty"` + Subtype string `json:"subtype,omitempty"` +} + +// Metric holds metric values. +type Metric struct { + // Value holds the metric value. + Value float64 `json:"value"` +} diff --git a/vendor/go.elastic.co/apm/modelwriter.go b/vendor/go.elastic.co/apm/modelwriter.go new file mode 100644 index 00000000000..e78d9be8f50 --- /dev/null +++ b/vendor/go.elastic.co/apm/modelwriter.go @@ -0,0 +1,267 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apm + +import ( + "go.elastic.co/apm/internal/ringbuffer" + "go.elastic.co/apm/model" + "go.elastic.co/apm/stacktrace" + "go.elastic.co/fastjson" +) + +const ( + transactionBlockTag ringbuffer.BlockTag = iota + 1 + spanBlockTag + errorBlockTag + metricsBlockTag +) + +// notSampled is used as the pointee for the model.Transaction.Sampled field +// of non-sampled transactions. +var notSampled = false + +type modelWriter struct { + buffer *ringbuffer.Buffer + metricsBuffer *ringbuffer.Buffer + cfg *tracerConfig + stats *TracerStats + json fastjson.Writer + modelStacktrace []model.StacktraceFrame +} + +// writeTransaction encodes tx as JSON to the buffer, and then resets tx. +func (w *modelWriter) writeTransaction(tx *Transaction, td *TransactionData) { + var modelTx model.Transaction + w.buildModelTransaction(&modelTx, tx, td) + w.json.RawString(`{"transaction":`) + modelTx.MarshalFastJSON(&w.json) + w.json.RawByte('}') + w.buffer.WriteBlock(w.json.Bytes(), transactionBlockTag) + w.json.Reset() + td.reset(tx.tracer) +} + +// writeSpan encodes s as JSON to the buffer, and then resets s. +func (w *modelWriter) writeSpan(s *Span, sd *SpanData) { + var modelSpan model.Span + w.buildModelSpan(&modelSpan, s, sd) + w.json.RawString(`{"span":`) + modelSpan.MarshalFastJSON(&w.json) + w.json.RawByte('}') + w.buffer.WriteBlock(w.json.Bytes(), spanBlockTag) + w.json.Reset() + sd.reset(s.tracer) +} + +// writeError encodes e as JSON to the buffer, and then resets e. +func (w *modelWriter) writeError(e *ErrorData) { + var modelError model.Error + w.buildModelError(&modelError, e) + w.json.RawString(`{"error":`) + modelError.MarshalFastJSON(&w.json) + w.json.RawByte('}') + w.buffer.WriteBlock(w.json.Bytes(), errorBlockTag) + w.json.Reset() + e.reset() +} + +// writeMetrics encodes m as JSON to the w.metricsBuffer, and then resets m. +// +// Note that we do not write metrics to the main ring buffer (w.buffer), as +// periodic metrics would be evicted by transactions/spans in a busy system. +func (w *modelWriter) writeMetrics(m *Metrics) { + for _, m := range m.transactionGroupMetrics { + w.json.RawString(`{"metricset":`) + m.MarshalFastJSON(&w.json) + w.json.RawString("}") + w.metricsBuffer.WriteBlock(w.json.Bytes(), metricsBlockTag) + w.json.Reset() + } + for _, m := range m.metrics { + w.json.RawString(`{"metricset":`) + m.MarshalFastJSON(&w.json) + w.json.RawString("}") + w.metricsBuffer.WriteBlock(w.json.Bytes(), metricsBlockTag) + w.json.Reset() + } + m.reset() +} + +func (w *modelWriter) buildModelTransaction(out *model.Transaction, tx *Transaction, td *TransactionData) { + out.ID = model.SpanID(tx.traceContext.Span) + out.TraceID = model.TraceID(tx.traceContext.Trace) + sampled := tx.traceContext.Options.Recorded() + if !sampled { + out.Sampled = ¬Sampled + } + + out.ParentID = model.SpanID(td.parentSpan) + out.Name = truncateString(td.Name) + out.Type = truncateString(td.Type) + out.Result = truncateString(td.Result) + out.Timestamp = model.Time(td.timestamp.UTC()) + out.Duration = td.Duration.Seconds() * 1000 + out.SpanCount.Started = td.spansCreated + out.SpanCount.Dropped = td.spansDropped + if sampled { + out.Context = td.Context.build() + } + + if len(w.cfg.sanitizedFieldNames) != 0 && out.Context != nil { + if out.Context.Request != nil { + sanitizeRequest(out.Context.Request, w.cfg.sanitizedFieldNames) + } + if out.Context.Response != nil { + sanitizeResponse(out.Context.Response, w.cfg.sanitizedFieldNames) + } + } +} + +func (w *modelWriter) buildModelSpan(out *model.Span, span *Span, sd *SpanData) { + w.modelStacktrace = w.modelStacktrace[:0] + out.ID = model.SpanID(span.traceContext.Span) + out.TraceID = model.TraceID(span.traceContext.Trace) + out.TransactionID = model.SpanID(span.transactionID) + + out.ParentID = model.SpanID(sd.parentID) + out.Name = truncateString(sd.Name) + out.Type = truncateString(sd.Type) + out.Subtype = truncateString(sd.Subtype) + out.Action = truncateString(sd.Action) + out.Timestamp = model.Time(sd.timestamp.UTC()) + out.Duration = sd.Duration.Seconds() * 1000 + out.Context = sd.Context.build() + + // Copy the span type to context.destination.service.type. + if out.Context != nil && out.Context.Destination != nil && out.Context.Destination.Service != nil { + out.Context.Destination.Service.Type = out.Type + } + + w.modelStacktrace = appendModelStacktraceFrames(w.modelStacktrace, sd.stacktrace) + out.Stacktrace = w.modelStacktrace + w.setStacktraceContext(out.Stacktrace) +} + +func (w *modelWriter) buildModelError(out *model.Error, e *ErrorData) { + out.ID = model.TraceID(e.ID) + out.TraceID = model.TraceID(e.TraceID) + out.ParentID = model.SpanID(e.ParentID) + out.TransactionID = model.SpanID(e.TransactionID) + out.Timestamp = model.Time(e.Timestamp.UTC()) + out.Context = e.Context.build() + out.Culprit = e.Culprit + + if !e.TransactionID.isZero() { + out.Transaction.Sampled = &e.transactionSampled + if e.transactionSampled { + out.Transaction.Type = e.transactionType + } + } + + // Create model stacktrace frames, and set the context. + w.modelStacktrace = w.modelStacktrace[:0] + var appendModelErrorStacktraceFrames func(exception *exceptionData) + appendModelErrorStacktraceFrames = func(exception *exceptionData) { + if len(exception.stacktrace) != 0 { + w.modelStacktrace = appendModelStacktraceFrames(w.modelStacktrace, exception.stacktrace) + } + for _, cause := range exception.cause { + appendModelErrorStacktraceFrames(&cause) + } + } + appendModelErrorStacktraceFrames(&e.exception) + if len(e.logStacktrace) != 0 { + w.modelStacktrace = appendModelStacktraceFrames(w.modelStacktrace, e.logStacktrace) + } + w.setStacktraceContext(w.modelStacktrace) + + var modelStacktraceOffset int + if e.exception.message != "" { + var buildException func(exception *exceptionData) model.Exception + culprit := e.Culprit + buildException = func(exception *exceptionData) model.Exception { + out := model.Exception{ + Message: exception.message, + Code: model.ExceptionCode{ + String: exception.Code.String, + Number: exception.Code.Number, + }, + Type: exception.Type.Name, + Module: exception.Type.PackagePath, + Handled: e.Handled, + } + if n := len(exception.stacktrace); n != 0 { + out.Stacktrace = w.modelStacktrace[modelStacktraceOffset : modelStacktraceOffset+n] + modelStacktraceOffset += n + } + if len(exception.attrs) != 0 { + out.Attributes = exception.attrs + } + if n := len(exception.cause); n > 0 { + out.Cause = make([]model.Exception, n) + for i := range exception.cause { + out.Cause[i] = buildException(&exception.cause[i]) + } + } + if culprit == "" { + culprit = stacktraceCulprit(out.Stacktrace) + } + return out + } + out.Exception = buildException(&e.exception) + out.Culprit = culprit + } + if e.log.Message != "" { + out.Log = model.Log{ + Message: e.log.Message, + Level: e.log.Level, + LoggerName: e.log.LoggerName, + ParamMessage: e.log.MessageFormat, + } + if n := len(e.logStacktrace); n != 0 { + out.Log.Stacktrace = w.modelStacktrace[modelStacktraceOffset : modelStacktraceOffset+n] + modelStacktraceOffset += n + if out.Culprit == "" { + out.Culprit = stacktraceCulprit(out.Log.Stacktrace) + } + } + } + out.Culprit = truncateString(out.Culprit) +} + +func stacktraceCulprit(frames []model.StacktraceFrame) string { + for _, frame := range frames { + if !frame.LibraryFrame { + return frame.Function + } + } + return "" +} + +func (w *modelWriter) setStacktraceContext(stack []model.StacktraceFrame) { + if w.cfg.contextSetter == nil || len(stack) == 0 { + return + } + err := stacktrace.SetContext(w.cfg.contextSetter, stack, w.cfg.preContext, w.cfg.postContext) + if err != nil { + if w.cfg.logger != nil { + w.cfg.logger.Debugf("setting context failed: %v", err) + } + w.stats.Errors.SetContext++ + } +} diff --git a/vendor/go.elastic.co/apm/module/apmelasticsearch/LICENSE b/vendor/go.elastic.co/apm/module/apmelasticsearch/LICENSE new file mode 100644 index 00000000000..b1a731fb5a3 --- /dev/null +++ b/vendor/go.elastic.co/apm/module/apmelasticsearch/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018 Elasticsearch BV + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.elastic.co/apm/module/apmelasticsearch/client.go b/vendor/go.elastic.co/apm/module/apmelasticsearch/client.go new file mode 100644 index 00000000000..9222c732e8b --- /dev/null +++ b/vendor/go.elastic.co/apm/module/apmelasticsearch/client.go @@ -0,0 +1,239 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apmelasticsearch + +import ( + "bytes" + "compress/gzip" + "io" + "io/ioutil" + "net/http" + "net/url" + "path" + "sync/atomic" + "unsafe" + + "go.elastic.co/apm" + "go.elastic.co/apm/module/apmhttp" +) + +// WrapRoundTripper returns an http.RoundTripper wrapping r, reporting each +// request as a span to Elastic APM, if the request's context contains a +// sampled transaction. +// +// If r is nil, then http.DefaultTransport is wrapped. +func WrapRoundTripper(r http.RoundTripper, o ...ClientOption) http.RoundTripper { + if r == nil { + r = http.DefaultTransport + } + rt := &roundTripper{r: r} + for _, o := range o { + o(rt) + } + return rt +} + +type roundTripper struct { + r http.RoundTripper +} + +// RoundTrip delegates to r.r, emitting a span if req's context contains a transaction. +// +// If req.URL.Path corresponds to a search request, then RoundTrip will attempt to extract +// the search query to use as the span context's "database statement". If the query is +// passed in as a query parameter (i.e. "/_search?q=foo:bar"), then that will be used; +// otherwise, the request body will be read. In the latter case, req.GetBody is used +// if defined, otherwise we read req.Body, preserving its contents for the underlying +// RoundTripper. If the request body is gzip-encoded, it will be decoded. +func (r *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + ctx := req.Context() + tx := apm.TransactionFromContext(ctx) + if tx == nil || !tx.Sampled() { + return r.r.RoundTrip(req) + } + + name := requestName(req) + span := tx.StartSpan(name, "db.elasticsearch", apm.SpanFromContext(ctx)) + if span.Dropped() { + span.End() + return r.r.RoundTrip(req) + } + + statement, req := captureSearchStatement(req) + username, _, _ := req.BasicAuth() + ctx = apm.ContextWithSpan(ctx, span) + req = apmhttp.RequestWithContext(ctx, req) + span.Context.SetHTTPRequest(req) + span.Context.SetDestinationService(apm.DestinationServiceSpanContext{ + Name: "elasticsearch", + Resource: "elasticsearch", + }) + span.Context.SetDatabase(apm.DatabaseSpanContext{ + Type: "elasticsearch", + Statement: statement, + User: username, + }) + + resp, err := r.r.RoundTrip(req) + if err != nil { + span.End() + } else { + span.Context.SetHTTPStatusCode(resp.StatusCode) + resp.Body = &responseBody{span: span, body: resp.Body} + } + return resp, err +} + +type responseBody struct { + span *apm.Span + body io.ReadCloser +} + +// Close closes the response body, and ends the span if it hasn't already been ended. +func (b *responseBody) Close() error { + b.endSpan() + return b.body.Close() +} + +// Read reads from the response body, and ends the span when io.EOF is returend if +// the span hasn't already been ended. +func (b *responseBody) Read(p []byte) (n int, err error) { + n, err = b.body.Read(p) + if err == io.EOF { + b.endSpan() + } + return n, err +} + +func (b *responseBody) endSpan() { + addr := (*unsafe.Pointer)(unsafe.Pointer(&b.span)) + if old := atomic.SwapPointer(addr, nil); old != nil { + (*apm.Span)(old).End() + } +} + +// ClientOption sets options for tracing client requests. +type ClientOption func(*roundTripper) + +// captureSearchStatement captures the search URI query or request body. +// +// If the request must be modified (i.e. because the body must be read), +// then captureSearchStatement returns a new *http.Request to be passed +// to the underlying http.RoundTripper. Otherwise, req is returned. +func captureSearchStatement(req *http.Request) (string, *http.Request) { + if !isSearchURL(req.URL) { + return "", req + } + + // If "q" is in query params, use that for statement. + if req.URL.RawQuery != "" { + query := req.URL.Query() + if statement := query.Get("q"); statement != "" { + return statement, req + } + } + if req.Body == nil || req.Body == http.NoBody { + return "", req + } + + var bodyBuf bytes.Buffer + if req.GetBody != nil { + // req.GetBody is defined, so we can read a copy of the + // request body instead of messing with the original request + // body. + body, err := req.GetBody() + if err != nil { + return "", req + } + if _, err := bodyBuf.ReadFrom(limitedBody(body, req.ContentLength)); err != nil { + body.Close() + return "", req + } + if err := body.Close(); err != nil { + return "", req + } + } else { + type readCloser struct { + io.Reader + io.Closer + } + newBody := &readCloser{Closer: req.Body} + reqCopy := *req + reqCopy.Body = newBody + if _, err := bodyBuf.ReadFrom(limitedBody(req.Body, req.ContentLength)); err != nil { + // Continue with the request, ensuring that req.Body returns + // the same content and error, but don't use the consumed body + // for the statement. + newBody.Reader = io.MultiReader(bytes.NewReader(bodyBuf.Bytes()), errorReader{err: err}) + return "", &reqCopy + } + newBody.Reader = io.MultiReader(bytes.NewReader(bodyBuf.Bytes()), req.Body) + req = &reqCopy + } + + var statement string + if req.Header.Get("Content-Encoding") == "gzip" { + if r, err := gzip.NewReader(&bodyBuf); err == nil { + if content, err := ioutil.ReadAll(r); err == nil { + statement = string(content) + } + } + } else { + statement = bodyBuf.String() + } + return statement, req +} + +func isSearchURL(url *url.URL) bool { + switch dir, file := path.Split(url.Path); file { + case "_search", "_msearch", "_rollup_search": + return true + case "template": + if dir == "" { + return false + } + switch _, file := path.Split(dir[:len(dir)-1]); file { + case "_search", "_msearch": + // ".../_search/template" or ".../_msearch/template" + return true + } + } + return false +} + +func limitedBody(r io.Reader, n int64) io.Reader { + // maxLimit is the maximum size of the request body that we'll read, + // set to 10000 to match the maximum length of the "db.statement" + // span context field. + const maxLimit = 10000 + if n <= 0 { + return r + } + if n > maxLimit { + n = maxLimit + } + return &io.LimitedReader{R: r, N: n} +} + +type errorReader struct { + err error +} + +func (r errorReader) Read(p []byte) (int, error) { + return 0, r.err +} diff --git a/vendor/go.elastic.co/apm/module/apmelasticsearch/doc.go b/vendor/go.elastic.co/apm/module/apmelasticsearch/doc.go new file mode 100644 index 00000000000..066dd0b9030 --- /dev/null +++ b/vendor/go.elastic.co/apm/module/apmelasticsearch/doc.go @@ -0,0 +1,20 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package apmelasticsearch provides support for tracing the +// HTTP transport layer of Elasticsearch clients. +package apmelasticsearch diff --git a/vendor/go.elastic.co/apm/module/apmelasticsearch/go.mod b/vendor/go.elastic.co/apm/module/apmelasticsearch/go.mod new file mode 100644 index 00000000000..4df7d8d7d9b --- /dev/null +++ b/vendor/go.elastic.co/apm/module/apmelasticsearch/go.mod @@ -0,0 +1,14 @@ +module go.elastic.co/apm/module/apmelasticsearch + +require ( + github.com/stretchr/testify v1.4.0 + go.elastic.co/apm v1.7.2 + go.elastic.co/apm/module/apmhttp v1.7.2 + golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 +) + +replace go.elastic.co/apm => ../.. + +replace go.elastic.co/apm/module/apmhttp => ../apmhttp + +go 1.13 diff --git a/vendor/go.elastic.co/apm/module/apmelasticsearch/go.sum b/vendor/go.elastic.co/apm/module/apmelasticsearch/go.sum new file mode 100644 index 00000000000..1976184453d --- /dev/null +++ b/vendor/go.elastic.co/apm/module/apmelasticsearch/go.sum @@ -0,0 +1,62 @@ +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/cucumber/godog v0.8.1 h1:lVb+X41I4YDreE+ibZ50bdXmySxgRviYFgKY6Aw4XE8= +github.com/cucumber/godog v0.8.1/go.mod h1:vSh3r/lM+psC1BPXvdkSEuNjmXfpVqrMGYAElF6hxnA= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/elastic/go-sysinfo v1.1.1 h1:ZVlaLDyhVkDfjwPGU55CQRCRolNpc7P0BbyhhQZQmMI= +github.com/elastic/go-sysinfo v1.1.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= +github.com/elastic/go-windows v1.0.0 h1:qLURgZFkkrYyTTkvYpsZIgf83AUsdIHfvlJaqaZ7aSY= +github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/santhosh-tekuri/jsonschema v1.2.4 h1:hNhW8e7t+H1vgY+1QeEQpveR6D4+OwKPXCfD2aieJis= +github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +go.elastic.co/fastjson v1.0.0 h1:ooXV/ABvf+tBul26jcVViPT3sBir0PvXgibYB1IQQzg= +go.elastic.co/fastjson v1.0.0/go.mod h1:PmeUOMMtLHQr9ZS9J9owrAVg0FkaZDRZJEFTTGHtchs= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e h1:9vRrk9YW2BTzLP0VCB9ZDjU4cPqkg+IDWL7XgxA1yxQ= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= +howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= diff --git a/vendor/go.elastic.co/apm/module/apmelasticsearch/requestname.go b/vendor/go.elastic.co/apm/module/apmelasticsearch/requestname.go new file mode 100644 index 00000000000..5dbb7d7be5d --- /dev/null +++ b/vendor/go.elastic.co/apm/module/apmelasticsearch/requestname.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build go1.10 + +package apmelasticsearch + +import ( + "net/http" + "strings" +) + +func requestName(req *http.Request) string { + const prefix = "Elasticsearch:" + path := strings.TrimLeft(req.URL.Path, "/") + + var b strings.Builder + b.Grow(len(prefix) + 1 + len(req.Method) + 1 + len(path)) + b.WriteString(prefix) + b.WriteRune(' ') + b.WriteString(req.Method) + b.WriteRune(' ') + b.WriteString(path) + return b.String() +} diff --git a/vendor/go.elastic.co/apm/module/apmelasticsearch/requestname_go19.go b/vendor/go.elastic.co/apm/module/apmelasticsearch/requestname_go19.go new file mode 100644 index 00000000000..14c3bc697bb --- /dev/null +++ b/vendor/go.elastic.co/apm/module/apmelasticsearch/requestname_go19.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build !go1.10 + +package apmelasticsearch + +import ( + "fmt" + "net/http" + "strings" +) + +func requestName(req *http.Request) string { + return fmt.Sprintf("Elasticsearch: %s %s", req.Method, strings.TrimLeft(req.URL.Path, "/")) +} diff --git a/vendor/go.elastic.co/apm/module/apmhttp/LICENSE b/vendor/go.elastic.co/apm/module/apmhttp/LICENSE new file mode 100644 index 00000000000..b1a731fb5a3 --- /dev/null +++ b/vendor/go.elastic.co/apm/module/apmhttp/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018 Elasticsearch BV + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.elastic.co/apm/module/apmhttp/client.go b/vendor/go.elastic.co/apm/module/apmhttp/client.go new file mode 100644 index 00000000000..2d0df4034c7 --- /dev/null +++ b/vendor/go.elastic.co/apm/module/apmhttp/client.go @@ -0,0 +1,200 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apmhttp + +import ( + "io" + "net/http" + "sync/atomic" + "unsafe" + + "go.elastic.co/apm" +) + +// WrapClient returns a new *http.Client with all fields copied +// across, and the Transport field wrapped with WrapRoundTripper +// such that client requests are reported as spans to Elastic APM +// if their context contains a sampled transaction. +// +// Spans are started just before the request is sent, and ended +// immediately if the request returned an error (e.g. due to socket +// timeout, but not a valid response with a non-200 status code), +// or otherwise when the response body is fully consumed or closed. +// +// If c is nil, then http.DefaultClient is wrapped. +func WrapClient(c *http.Client, o ...ClientOption) *http.Client { + if c == nil { + c = http.DefaultClient + } + copied := *c + copied.Transport = WrapRoundTripper(copied.Transport, o...) + return &copied +} + +// WrapRoundTripper returns an http.RoundTripper wrapping r, reporting each +// request as a span to Elastic APM, if the request's context contains a +// sampled transaction. +// +// If r is nil, then http.DefaultTransport is wrapped. +func WrapRoundTripper(r http.RoundTripper, o ...ClientOption) http.RoundTripper { + if r == nil { + r = http.DefaultTransport + } + rt := &roundTripper{ + r: r, + requestName: ClientRequestName, + requestIgnorer: IgnoreNone, + } + for _, o := range o { + o(rt) + } + return rt +} + +type roundTripper struct { + r http.RoundTripper + requestName RequestNameFunc + requestIgnorer RequestIgnorerFunc +} + +// RoundTrip delegates to r.r, emitting a span if req's context +// contains a transaction. +func (r *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if r.requestIgnorer(req) { + return r.r.RoundTrip(req) + } + ctx := req.Context() + tx := apm.TransactionFromContext(ctx) + if tx == nil { + return r.r.RoundTrip(req) + } + + // RoundTrip is not supposed to mutate req, so copy req + // and set the trace-context headers only in the copy. + reqCopy := *req + reqCopy.Header = make(http.Header, len(req.Header)) + for k, v := range req.Header { + reqCopy.Header[k] = v + } + req = &reqCopy + + propagateLegacyHeader := tx.ShouldPropagateLegacyHeader() + traceContext := tx.TraceContext() + if !traceContext.Options.Recorded() { + r.setHeaders(req, traceContext, propagateLegacyHeader) + return r.r.RoundTrip(req) + } + + name := r.requestName(req) + span := tx.StartSpan(name, "external.http", apm.SpanFromContext(ctx)) + if !span.Dropped() { + traceContext = span.TraceContext() + ctx = apm.ContextWithSpan(ctx, span) + req = RequestWithContext(ctx, req) + span.Context.SetHTTPRequest(req) + } else { + span.End() + span = nil + } + + r.setHeaders(req, traceContext, propagateLegacyHeader) + resp, err := r.r.RoundTrip(req) + if span != nil { + if err != nil { + span.End() + } else { + span.Context.SetHTTPStatusCode(resp.StatusCode) + resp.Body = &responseBody{span: span, body: resp.Body} + } + } + return resp, err +} + +func (r *roundTripper) setHeaders(req *http.Request, traceContext apm.TraceContext, propagateLegacyHeader bool) { + headerValue := FormatTraceparentHeader(traceContext) + if propagateLegacyHeader { + req.Header.Set(ElasticTraceparentHeader, headerValue) + } + req.Header.Set(W3CTraceparentHeader, headerValue) + if tracestate := traceContext.State.String(); tracestate != "" { + req.Header.Set(TracestateHeader, tracestate) + } +} + +// CloseIdleConnections calls r.r.CloseIdleConnections if the method exists. +func (r *roundTripper) CloseIdleConnections() { + type closeIdler interface { + CloseIdleConnections() + } + if r, ok := r.r.(closeIdler); ok { + r.CloseIdleConnections() + } +} + +// CancelRequest calls r.r.CancelRequest(req) if the method exists. +func (r *roundTripper) CancelRequest(req *http.Request) { + type cancelRequester interface { + CancelRequest(*http.Request) + } + if r, ok := r.r.(cancelRequester); ok { + r.CancelRequest(req) + } +} + +type responseBody struct { + span *apm.Span + body io.ReadCloser +} + +// Close closes the response body, and ends the span if it hasn't already been ended. +func (b *responseBody) Close() error { + b.endSpan() + return b.body.Close() +} + +// Read reads from the response body, and ends the span when io.EOF is returend if +// the span hasn't already been ended. +func (b *responseBody) Read(p []byte) (n int, err error) { + n, err = b.body.Read(p) + if err == io.EOF { + b.endSpan() + } + return n, err +} + +func (b *responseBody) endSpan() { + addr := (*unsafe.Pointer)(unsafe.Pointer(&b.span)) + if old := atomic.SwapPointer(addr, nil); old != nil { + (*apm.Span)(old).End() + } +} + +// ClientOption sets options for tracing client requests. +type ClientOption func(*roundTripper) + +// WithClientRequestName returns a ClientOption which sets r as the function +// to use to obtain the span name for the given http request. +func WithClientRequestName(r RequestNameFunc) ClientOption { + if r == nil { + panic("r == nil") + } + + return ClientOption(func(rt *roundTripper) { + rt.requestName = r + }) +} diff --git a/vendor/go.elastic.co/apm/module/apmhttp/context.go b/vendor/go.elastic.co/apm/module/apmhttp/context.go new file mode 100644 index 00000000000..00c450eba08 --- /dev/null +++ b/vendor/go.elastic.co/apm/module/apmhttp/context.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apmhttp + +import ( + "fmt" +) + +var standardStatusCodeResults = [...]string{ + "HTTP 1xx", + "HTTP 2xx", + "HTTP 3xx", + "HTTP 4xx", + "HTTP 5xx", +} + +// StatusCodeResult returns the transaction result value to use for the given +// status code. +func StatusCodeResult(statusCode int) string { + switch i := statusCode / 100; i { + case 1, 2, 3, 4, 5: + return standardStatusCodeResults[i-1] + } + return fmt.Sprintf("HTTP %d", statusCode) +} diff --git a/vendor/go.elastic.co/apm/module/apmhttp/doc.go b/vendor/go.elastic.co/apm/module/apmhttp/doc.go new file mode 100644 index 00000000000..659281badcd --- /dev/null +++ b/vendor/go.elastic.co/apm/module/apmhttp/doc.go @@ -0,0 +1,20 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package apmhttp provides a tracing middleware http.Handler for +// servers, and a tracing http.RoundTripper for clients. +package apmhttp diff --git a/vendor/go.elastic.co/apm/module/apmhttp/go.mod b/vendor/go.elastic.co/apm/module/apmhttp/go.mod new file mode 100644 index 00000000000..70240cdd353 --- /dev/null +++ b/vendor/go.elastic.co/apm/module/apmhttp/go.mod @@ -0,0 +1,13 @@ +module go.elastic.co/apm/module/apmhttp + +require ( + github.com/pkg/errors v0.8.1 + github.com/stretchr/testify v1.4.0 + go.elastic.co/apm v1.7.2 + golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 + golang.org/x/text v0.3.2 // indirect +) + +replace go.elastic.co/apm => ../.. + +go 1.13 diff --git a/vendor/go.elastic.co/apm/module/apmhttp/go.sum b/vendor/go.elastic.co/apm/module/apmhttp/go.sum new file mode 100644 index 00000000000..1976184453d --- /dev/null +++ b/vendor/go.elastic.co/apm/module/apmhttp/go.sum @@ -0,0 +1,62 @@ +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/cucumber/godog v0.8.1 h1:lVb+X41I4YDreE+ibZ50bdXmySxgRviYFgKY6Aw4XE8= +github.com/cucumber/godog v0.8.1/go.mod h1:vSh3r/lM+psC1BPXvdkSEuNjmXfpVqrMGYAElF6hxnA= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/elastic/go-sysinfo v1.1.1 h1:ZVlaLDyhVkDfjwPGU55CQRCRolNpc7P0BbyhhQZQmMI= +github.com/elastic/go-sysinfo v1.1.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= +github.com/elastic/go-windows v1.0.0 h1:qLURgZFkkrYyTTkvYpsZIgf83AUsdIHfvlJaqaZ7aSY= +github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/santhosh-tekuri/jsonschema v1.2.4 h1:hNhW8e7t+H1vgY+1QeEQpveR6D4+OwKPXCfD2aieJis= +github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +go.elastic.co/fastjson v1.0.0 h1:ooXV/ABvf+tBul26jcVViPT3sBir0PvXgibYB1IQQzg= +go.elastic.co/fastjson v1.0.0/go.mod h1:PmeUOMMtLHQr9ZS9J9owrAVg0FkaZDRZJEFTTGHtchs= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e h1:9vRrk9YW2BTzLP0VCB9ZDjU4cPqkg+IDWL7XgxA1yxQ= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= +howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= diff --git a/vendor/go.elastic.co/apm/module/apmhttp/handler.go b/vendor/go.elastic.co/apm/module/apmhttp/handler.go new file mode 100644 index 00000000000..8aecf24ba48 --- /dev/null +++ b/vendor/go.elastic.co/apm/module/apmhttp/handler.go @@ -0,0 +1,330 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apmhttp + +import ( + "context" + "net/http" + + "go.elastic.co/apm" +) + +// Wrap returns an http.Handler wrapping h, reporting each request as +// a transaction to Elastic APM. +// +// By default, the returned Handler will use apm.DefaultTracer. +// Use WithTracer to specify an alternative tracer. +// +// By default, the returned Handler will recover panics, reporting +// them to the configured tracer. To override this behaviour, use +// WithRecovery. +func Wrap(h http.Handler, o ...ServerOption) http.Handler { + if h == nil { + panic("h == nil") + } + handler := &handler{ + handler: h, + tracer: apm.DefaultTracer, + requestName: ServerRequestName, + requestIgnorer: DefaultServerRequestIgnorer(), + } + for _, o := range o { + o(handler) + } + if handler.recovery == nil { + handler.recovery = NewTraceRecovery(handler.tracer) + } + return handler +} + +// handler wraps an http.Handler, reporting a new transaction for each request. +// +// The http.Request's context will be updated with the transaction. +type handler struct { + handler http.Handler + tracer *apm.Tracer + recovery RecoveryFunc + panicPropagation bool + requestName RequestNameFunc + requestIgnorer RequestIgnorerFunc +} + +// ServeHTTP delegates to h.Handler, tracing the transaction with +// h.Tracer, or apm.DefaultTracer if h.Tracer is nil. +func (h *handler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if !h.tracer.Active() || h.requestIgnorer(req) { + h.handler.ServeHTTP(w, req) + return + } + tx, req := StartTransaction(h.tracer, h.requestName(req), req) + defer tx.End() + + body := h.tracer.CaptureHTTPRequestBody(req) + w, resp := WrapResponseWriter(w) + defer func() { + if v := recover(); v != nil { + if h.panicPropagation { + defer panic(v) + // 500 status code will be set only for APM transaction + // to allow other middleware to choose a different response code + if resp.StatusCode == 0 { + resp.StatusCode = http.StatusInternalServerError + } + } else if resp.StatusCode == 0 { + w.WriteHeader(http.StatusInternalServerError) + } + h.recovery(w, req, resp, body, tx, v) + } + SetTransactionContext(tx, req, resp, body) + body.Discard() + }() + h.handler.ServeHTTP(w, req) + if resp.StatusCode == 0 { + resp.StatusCode = http.StatusOK + } +} + +// StartTransaction returns a new Transaction with name, +// created with tracer, and taking trace context from req. +// +// If the transaction is not ignored, the request will be +// returned with the transaction added to its context. +func StartTransaction(tracer *apm.Tracer, name string, req *http.Request) (*apm.Transaction, *http.Request) { + traceContext, ok := getRequestTraceparent(req, ElasticTraceparentHeader) + if !ok { + traceContext, ok = getRequestTraceparent(req, W3CTraceparentHeader) + } + if ok { + traceContext.State, _ = ParseTracestateHeader(req.Header[TracestateHeader]...) + } + tx := tracer.StartTransactionOptions(name, "request", apm.TransactionOptions{TraceContext: traceContext}) + ctx := apm.ContextWithTransaction(req.Context(), tx) + req = RequestWithContext(ctx, req) + return tx, req +} + +func getRequestTraceparent(req *http.Request, header string) (apm.TraceContext, bool) { + if values := req.Header[header]; len(values) == 1 && values[0] != "" { + if c, err := ParseTraceparentHeader(values[0]); err == nil { + return c, true + } + } + return apm.TraceContext{}, false +} + +// SetTransactionContext sets tx.Result and, if the transaction is being +// sampled, sets tx.Context with information from req, resp, and body. +func SetTransactionContext(tx *apm.Transaction, req *http.Request, resp *Response, body *apm.BodyCapturer) { + tx.Result = StatusCodeResult(resp.StatusCode) + if !tx.Sampled() { + return + } + SetContext(&tx.Context, req, resp, body) +} + +// SetContext sets the context for a transaction or error using information +// from req, resp, and body. +func SetContext(ctx *apm.Context, req *http.Request, resp *Response, body *apm.BodyCapturer) { + ctx.SetHTTPRequest(req) + ctx.SetHTTPRequestBody(body) + ctx.SetHTTPStatusCode(resp.StatusCode) + ctx.SetHTTPResponseHeaders(resp.Headers) +} + +// WrapResponseWriter wraps an http.ResponseWriter and returns the wrapped +// value along with a *Response which will be filled in when the handler +// is called. The *Response value must not be inspected until after the +// request has been handled, to avoid data races. If neither of the +// ResponseWriter's Write or WriteHeader methods are called, then the +// response's StatusCode field will be zero. +// +// The returned http.ResponseWriter implements http.Pusher and http.Hijacker +// if and only if the provided http.ResponseWriter does. +func WrapResponseWriter(w http.ResponseWriter) (http.ResponseWriter, *Response) { + rw := responseWriter{ + ResponseWriter: w, + resp: Response{ + Headers: w.Header(), + }, + } + h, _ := w.(http.Hijacker) + p, _ := w.(http.Pusher) + switch { + case h != nil && p != nil: + rwhp := &responseWriterHijackerPusher{ + responseWriter: rw, + Hijacker: h, + Pusher: p, + } + return rwhp, &rwhp.resp + case h != nil: + rwh := &responseWriterHijacker{ + responseWriter: rw, + Hijacker: h, + } + return rwh, &rwh.resp + case p != nil: + rwp := &responseWriterPusher{ + responseWriter: rw, + Pusher: p, + } + return rwp, &rwp.resp + } + return &rw, &rw.resp +} + +// Response records details of the HTTP response. +type Response struct { + // StatusCode records the HTTP status code set via WriteHeader. + StatusCode int + + // Headers holds the headers set in the ResponseWriter. + Headers http.Header +} + +type responseWriter struct { + http.ResponseWriter + resp Response +} + +// WriteHeader sets w.resp.StatusCode and calls through to the embedded +// ResponseWriter. +func (w *responseWriter) WriteHeader(statusCode int) { + w.ResponseWriter.WriteHeader(statusCode) + w.resp.StatusCode = statusCode +} + +// Write calls through to the embedded ResponseWriter, setting +// w.resp.StatusCode to http.StatusOK if WriteHeader has not already +// been called. +func (w *responseWriter) Write(data []byte) (int, error) { + n, err := w.ResponseWriter.Write(data) + if w.resp.StatusCode == 0 { + w.resp.StatusCode = http.StatusOK + } + return n, err +} + +// CloseNotify returns w.closeNotify() if w.closeNotify is non-nil, +// otherwise it returns nil. +func (w *responseWriter) CloseNotify() <-chan bool { + if closeNotifier, ok := w.ResponseWriter.(http.CloseNotifier); ok { + return closeNotifier.CloseNotify() + } + return nil +} + +// Flush calls w.flush() if w.flush is non-nil, otherwise +// it does nothing. +func (w *responseWriter) Flush() { + if flusher, ok := w.ResponseWriter.(http.Flusher); ok { + flusher.Flush() + } +} + +type responseWriterHijacker struct { + responseWriter + http.Hijacker +} + +type responseWriterPusher struct { + responseWriter + http.Pusher +} + +type responseWriterHijackerPusher struct { + responseWriter + http.Hijacker + http.Pusher +} + +// ServerOption sets options for tracing server requests. +type ServerOption func(*handler) + +// WithTracer returns a ServerOption which sets t as the tracer +// to use for tracing server requests. +func WithTracer(t *apm.Tracer) ServerOption { + if t == nil { + panic("t == nil") + } + return func(h *handler) { + h.tracer = t + } +} + +// WithRecovery returns a ServerOption which sets r as the recovery +// function to use for tracing server requests. +func WithRecovery(r RecoveryFunc) ServerOption { + if r == nil { + panic("r == nil") + } + return func(h *handler) { + h.recovery = r + } +} + +// WithPanicPropagation returns a ServerOption which enable panic propagation. +// Any panic will be recovered and recorded as an error in a transaction, then +// panic will be caused again. +func WithPanicPropagation() ServerOption { + return func(h *handler) { + h.panicPropagation = true + } +} + +// RequestNameFunc is the type of a function for use in +// WithServerRequestName. +type RequestNameFunc func(*http.Request) string + +// WithServerRequestName returns a ServerOption which sets r as the function +// to use to obtain the transaction name for the given server request. +func WithServerRequestName(r RequestNameFunc) ServerOption { + if r == nil { + panic("r == nil") + } + return func(h *handler) { + h.requestName = r + } +} + +// RequestIgnorerFunc is the type of a function for use in +// WithServerRequestIgnorer. +type RequestIgnorerFunc func(*http.Request) bool + +// WithServerRequestIgnorer returns a ServerOption which sets r as the +// function to use to determine whether or not a server request should +// be ignored. If r is nil, all requests will be reported. +func WithServerRequestIgnorer(r RequestIgnorerFunc) ServerOption { + if r == nil { + r = IgnoreNone + } + return func(h *handler) { + h.requestIgnorer = r + } +} + +// RequestWithContext is equivalent to req.WithContext, except that the URL +// pointer is copied, rather than the contents. +func RequestWithContext(ctx context.Context, req *http.Request) *http.Request { + url := req.URL + req.URL = nil + reqCopy := req.WithContext(ctx) + reqCopy.URL = url + req.URL = url + return reqCopy +} diff --git a/vendor/go.elastic.co/apm/module/apmhttp/ignorer.go b/vendor/go.elastic.co/apm/module/apmhttp/ignorer.go new file mode 100644 index 00000000000..6ec56dd50f3 --- /dev/null +++ b/vendor/go.elastic.co/apm/module/apmhttp/ignorer.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apmhttp + +import ( + "net/http" + "regexp" + "sync" + + "go.elastic.co/apm/internal/configutil" + "go.elastic.co/apm/internal/wildcard" +) + +const ( + envIgnoreURLs = "ELASTIC_APM_IGNORE_URLS" +) + +var ( + defaultServerRequestIgnorerOnce sync.Once + defaultServerRequestIgnorer RequestIgnorerFunc = IgnoreNone +) + +// DefaultServerRequestIgnorer returns the default RequestIgnorer to use in +// handlers. If ELASTIC_APM_IGNORE_URLS is set, it will be treated as a +// comma-separated list of wildcard patterns; requests that match any of the +// patterns will be ignored. +func DefaultServerRequestIgnorer() RequestIgnorerFunc { + defaultServerRequestIgnorerOnce.Do(func() { + matchers := configutil.ParseWildcardPatternsEnv(envIgnoreURLs, nil) + if len(matchers) != 0 { + defaultServerRequestIgnorer = NewWildcardPatternsRequestIgnorer(matchers) + } + }) + return defaultServerRequestIgnorer +} + +// NewRegexpRequestIgnorer returns a RequestIgnorerFunc which matches requests' +// URLs against re. Note that for server requests, typically only Path and +// possibly RawQuery will be set, so the regular expression should take this +// into account. +func NewRegexpRequestIgnorer(re *regexp.Regexp) RequestIgnorerFunc { + if re == nil { + panic("re == nil") + } + return func(r *http.Request) bool { + return re.MatchString(r.URL.String()) + } +} + +// NewWildcardPatternsRequestIgnorer returns a RequestIgnorerFunc which matches +// requests' URLs against any of the matchers. Note that for server requests, +// typically only Path and possibly RawQuery will be set, so the wildcard patterns +// should take this into account. +func NewWildcardPatternsRequestIgnorer(matchers wildcard.Matchers) RequestIgnorerFunc { + if len(matchers) == 0 { + panic("len(matchers) == 0") + } + return func(r *http.Request) bool { + return matchers.MatchAny(r.URL.String()) + } +} + +// IgnoreNone is a RequestIgnorerFunc which ignores no requests. +func IgnoreNone(*http.Request) bool { + return false +} diff --git a/vendor/go.elastic.co/apm/module/apmhttp/recovery.go b/vendor/go.elastic.co/apm/module/apmhttp/recovery.go new file mode 100644 index 00000000000..988769c1456 --- /dev/null +++ b/vendor/go.elastic.co/apm/module/apmhttp/recovery.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apmhttp + +import ( + "net/http" + + "go.elastic.co/apm" +) + +// RecoveryFunc is the type of a function for use in WithRecovery. +type RecoveryFunc func( + w http.ResponseWriter, + req *http.Request, + resp *Response, + body *apm.BodyCapturer, + tx *apm.Transaction, + recovered interface{}, +) + +// NewTraceRecovery returns a RecoveryFunc for use in WithRecovery. +// +// The returned RecoveryFunc will report recovered error to Elastic APM +// using the given Tracer, or apm.DefaultTracer if t is nil. The +// error will be linked to the given transaction. +// +// If headers have not already been written, a 500 response will be sent. +func NewTraceRecovery(t *apm.Tracer) RecoveryFunc { + if t == nil { + t = apm.DefaultTracer + } + return func( + w http.ResponseWriter, + req *http.Request, + resp *Response, + body *apm.BodyCapturer, + tx *apm.Transaction, + recovered interface{}, + ) { + e := t.Recovered(recovered) + e.SetTransaction(tx) + SetContext(&e.Context, req, resp, body) + e.Send() + } +} diff --git a/vendor/go.elastic.co/apm/module/apmhttp/requestname.go b/vendor/go.elastic.co/apm/module/apmhttp/requestname.go new file mode 100644 index 00000000000..877aac15306 --- /dev/null +++ b/vendor/go.elastic.co/apm/module/apmhttp/requestname.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build go1.10 + +package apmhttp + +import ( + "net/http" + "strings" +) + +// UnknownRouteRequestName returns the transaction name for the server request, req, +// when the route could not be determined. +func UnknownRouteRequestName(req *http.Request) string { + const suffix = " unknown route" + var b strings.Builder + b.Grow(len(req.Method) + len(suffix)) + b.WriteString(req.Method) + b.WriteString(suffix) + return b.String() +} + +// ServerRequestName returns the transaction name for the server request, req. +func ServerRequestName(req *http.Request) string { + var b strings.Builder + b.Grow(len(req.Method) + len(req.URL.Path) + 1) + b.WriteString(req.Method) + b.WriteByte(' ') + b.WriteString(req.URL.Path) + return b.String() +} + +// ClientRequestName returns the span name for the client request, req. +func ClientRequestName(req *http.Request) string { + var b strings.Builder + b.Grow(len(req.Method) + len(req.URL.Host) + 1) + b.WriteString(req.Method) + b.WriteByte(' ') + b.WriteString(req.URL.Host) + return b.String() +} diff --git a/vendor/go.elastic.co/apm/module/apmhttp/requestname_go19.go b/vendor/go.elastic.co/apm/module/apmhttp/requestname_go19.go new file mode 100644 index 00000000000..2a84ec75955 --- /dev/null +++ b/vendor/go.elastic.co/apm/module/apmhttp/requestname_go19.go @@ -0,0 +1,46 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build !go1.10 + +package apmhttp + +import "net/http" + +// UnknownRouteRequestName returns the transaction name for the server request, req, +// when the route could not be determined. +func UnknownRouteRequestName(req *http.Request) string { + return req.Method + " unknown route" +} + +// ServerRequestName returns the transaction name for the server request, req. +func ServerRequestName(req *http.Request) string { + buf := make([]byte, len(req.Method)+len(req.URL.Path)+1) + n := copy(buf, req.Method) + buf[n] = ' ' + copy(buf[n+1:], req.URL.Path) + return string(buf) +} + +// ClientRequestName returns the span name for the client request, req. +func ClientRequestName(req *http.Request) string { + buf := make([]byte, len(req.Method)+len(req.URL.Host)+1) + n := copy(buf, req.Method) + buf[n] = ' ' + copy(buf[n+1:], req.URL.Host) + return string(buf) +} diff --git a/vendor/go.elastic.co/apm/module/apmhttp/traceheaders.go b/vendor/go.elastic.co/apm/module/apmhttp/traceheaders.go new file mode 100644 index 00000000000..8a00a70db30 --- /dev/null +++ b/vendor/go.elastic.co/apm/module/apmhttp/traceheaders.go @@ -0,0 +1,168 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apmhttp + +import ( + "encoding/hex" + "fmt" + "strings" + + "github.com/pkg/errors" + + "go.elastic.co/apm" +) + +const ( + // TraceparentHeader is the HTTP header for trace propagation. + // + // For backwards compatibility, this is currently an alias for + // for ElasticTraceparentHeader, but the more specific constants + // below should be preferred. In a future version this will be + // replaced by the standard W3C header. + TraceparentHeader = ElasticTraceparentHeader + + // ElasticTraceparentHeader is the legacy HTTP header for trace propagation, + // maintained for backwards compatibility with older agents. + ElasticTraceparentHeader = "Elastic-Apm-Traceparent" + + // W3CTraceparentHeader is the standard W3C Trace-Context HTTP + // header for trace propagation. + W3CTraceparentHeader = "Traceparent" + + // TracestateHeader is the standard W3C Trace-Context HTTP header + // for vendor-specific trace propagation. + TracestateHeader = "Tracestate" +) + +// FormatTraceparentHeader formats the given trace context as a +// traceparent header. +func FormatTraceparentHeader(c apm.TraceContext) string { + const version = 0 + return fmt.Sprintf("%02x-%032x-%016x-%02x", 0, c.Trace[:], c.Span[:], c.Options) +} + +// ParseTraceparentHeader parses the given header, which is expected to be in +// the W3C Trace-Context traceparent format according to W3C Editor's Draft 23 May 2018: +// https://w3c.github.io/trace-context/#traceparent-field +// +// Note that the returned TraceContext's Trace and Span fields are not necessarily +// valid. The caller must decide whether or not it wishes to disregard invalid +// trace/span IDs, and validate them as required using their provided Validate +// methods. +// +// The returned TraceContext's TraceState field will be the empty value. Use +// ParseTracestateHeader to parse that separately. +func ParseTraceparentHeader(h string) (apm.TraceContext, error) { + var out apm.TraceContext + if len(h) < 3 || h[2] != '-' { + return out, errors.Errorf("invalid traceparent header %q", h) + } + var version byte + if !strings.HasPrefix(h, "00") { + decoded, err := hex.DecodeString(h[:2]) + if err != nil { + return out, errors.Wrap(err, "error decoding traceparent header version") + } + version = decoded[0] + } + h = h[3:] + + switch version { + case 255: + // "Version 255 is invalid." + return out, errors.Errorf("traceparent header version 255 is forbidden") + default: + // "If higher version is detected - implementation SHOULD try to parse it." + fallthrough + case 0: + // Version 00: + // + // version-format = trace-id "-" span-id "-" trace-options + // trace-id = 32HEXDIG + // span-id = 16HEXDIG + // trace-options = 2HEXDIG + const ( + traceIDEnd = 32 + spanIDStart = traceIDEnd + 1 + spanIDEnd = spanIDStart + 16 + traceOptionsStart = spanIDEnd + 1 + traceOptionsEnd = traceOptionsStart + 2 + ) + switch { + case len(h) < traceOptionsEnd, + h[traceIDEnd] != '-', + h[spanIDEnd] != '-', + version == 0 && len(h) != traceOptionsEnd, + version > 0 && len(h) > traceOptionsEnd && h[traceOptionsEnd] != '-': + return out, errors.Errorf("invalid version %d traceparent header %q", version, h) + } + if _, err := hex.Decode(out.Trace[:], []byte(h[:traceIDEnd])); err != nil { + return out, errors.Wrapf(err, "error decoding trace-id for version %d", version) + } + if err := out.Trace.Validate(); err != nil { + return out, errors.Wrap(err, "invalid trace-id") + } + if _, err := hex.Decode(out.Span[:], []byte(h[spanIDStart:spanIDEnd])); err != nil { + return out, errors.Wrapf(err, "error decoding span-id for version %d", version) + } + if err := out.Span.Validate(); err != nil { + return out, errors.Wrap(err, "invalid span-id") + } + var traceOptions [1]byte + if _, err := hex.Decode(traceOptions[:], []byte(h[traceOptionsStart:traceOptionsEnd])); err != nil { + return out, errors.Wrapf(err, "error decoding trace-options for version %d", version) + } + out.Options = apm.TraceOptions(traceOptions[0]) + return out, nil + } +} + +// ParseTracestateHeader parses the given header, which is expected to be in the +// W3C Trace-Context tracestate format according to W3C Editor's Draft 18 Nov 2019: +// https://w3c.github.io/trace-context/#tracestate-header +// +// Note that the returned TraceState is not necessarily valid. The caller must +// decide whether or not it wishes to disregard invalid tracestate entries, and +// validate them as required using their provided Validate methods. +// +// Multiple header values may be presented, in which case they will be treated as +// if they are concatenated together with commas. +func ParseTracestateHeader(h ...string) (apm.TraceState, error) { + var entries []apm.TraceStateEntry + for _, h := range h { + for { + h = strings.TrimSpace(h) + if h == "" { + break + } + kv := h + if comma := strings.IndexRune(h, ','); comma != -1 { + kv = strings.TrimSpace(h[:comma]) + h = h[comma+1:] + } else { + h = "" + } + equal := strings.IndexRune(kv, '=') + if equal == -1 { + return apm.TraceState{}, errors.New("missing '=' in tracestate entry") + } + entries = append(entries, apm.TraceStateEntry{Key: kv[:equal], Value: kv[equal+1:]}) + } + } + return apm.NewTraceState(entries...), nil +} diff --git a/vendor/go.elastic.co/apm/profiling.go b/vendor/go.elastic.co/apm/profiling.go new file mode 100644 index 00000000000..7cc3fe9affe --- /dev/null +++ b/vendor/go.elastic.co/apm/profiling.go @@ -0,0 +1,164 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apm + +import ( + "bytes" + "context" + "io" + "runtime/pprof" + "time" + + "github.com/pkg/errors" +) + +type profilingState struct { + profileType string + profileStart func(io.Writer) error + profileStop func() + sender profileSender + + interval time.Duration + duration time.Duration // not relevant to all profiles + + timer *time.Timer + timerStart time.Time + buf bytes.Buffer + finished chan struct{} +} + +// newCPUProfilingState calls newProfilingState with the +// profiler type set to "cpu", and using pprof.StartCPUProfile +// and pprof.StopCPUProfile. +func newCPUProfilingState(sender profileSender) *profilingState { + return newProfilingState("cpu", pprof.StartCPUProfile, pprof.StopCPUProfile, sender) +} + +// newHeapProfilingState calls newProfilingState with the +// profiler type set to "heap", and using pprof.Lookup("heap").WriteTo(writer, 0). +func newHeapProfilingState(sender profileSender) *profilingState { + return newLookupProfilingState("heap", sender) +} + +func newLookupProfilingState(name string, sender profileSender) *profilingState { + profileStart := func(w io.Writer) error { + profile := pprof.Lookup(name) + if profile == nil { + return errors.Errorf("no profile called %q", name) + } + return profile.WriteTo(w, 0) + } + return newProfilingState("heap", profileStart, func() {}, sender) +} + +// newProfilingState returns a new profilingState, +// with its timer stopped. The timer may be started +// by calling profilingState.updateConfig. +func newProfilingState( + profileType string, + profileStart func(io.Writer) error, + profileStop func(), + sender profileSender, +) *profilingState { + state := &profilingState{ + profileType: profileType, + profileStart: profileStart, + profileStop: profileStop, + sender: sender, + timer: time.NewTimer(0), + finished: make(chan struct{}, 1), + } + if !state.timer.Stop() { + <-state.timer.C + } + return state +} + +func (state *profilingState) updateConfig(interval, duration time.Duration) { + if state.sender == nil { + // No profile sender, no point in starting a timer. + return + } + state.duration = duration + if state.interval == interval { + return + } + if state.timerStart.IsZero() { + state.interval = interval + state.resetTimer() + } + // TODO(axw) handle extending/cutting short running timers once + // it is possible to dynamically control profiling configuration. +} + +func (state *profilingState) resetTimer() { + if state.interval != 0 { + state.timer.Reset(state.interval) + state.timerStart = time.Now() + } else { + state.timerStart = time.Time{} + } +} + +// start spawns a goroutine that will capture a profile, send it using state.sender, +// and finally signal state.finished. +// +// start will return immediately after spawning the goroutine. +func (state *profilingState) start(ctx context.Context, logger Logger, metadata io.Reader) { + // The state.duration field may be updated after the goroutine starts, + // by the caller, so it must be read outside the goroutine. + duration := state.duration + go func() { + defer func() { state.finished <- struct{}{} }() + if err := state.profile(ctx, duration); err != nil { + if logger != nil && ctx.Err() == nil { + logger.Errorf("%s", err) + } + return + } + // TODO(axw) backoff like SendStream requests + if err := state.sender.SendProfile(ctx, metadata, &state.buf); err != nil { + if logger != nil && ctx.Err() == nil { + logger.Errorf("failed to send %s profile: %s", state.profileType, err) + } + } + }() +} + +func (state *profilingState) profile(ctx context.Context, duration time.Duration) error { + state.buf.Reset() + if err := state.profileStart(&state.buf); err != nil { + return errors.Wrapf(err, "failed to start %s profile", state.profileType) + } + defer state.profileStop() + + if duration > 0 { + timer := time.NewTimer(duration) + defer timer.Stop() + select { + case <-ctx.Done(): + return ctx.Err() + case <-timer.C: + } + } + return nil +} + +type profileSender interface { + SendProfile(ctx context.Context, metadata io.Reader, profile ...io.Reader) error +} diff --git a/vendor/go.elastic.co/apm/sampler.go b/vendor/go.elastic.co/apm/sampler.go new file mode 100644 index 00000000000..3cf4591c646 --- /dev/null +++ b/vendor/go.elastic.co/apm/sampler.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apm + +import ( + "encoding/binary" + "math" + "math/big" + + "github.com/pkg/errors" +) + +// Sampler provides a means of sampling transactions. +type Sampler interface { + // Sample indicates whether or not a transaction + // should be sampled. This method will be invoked + // by calls to Tracer.StartTransaction for the root + // of a trace, so it must be goroutine-safe, and + // should avoid synchronization as far as possible. + Sample(TraceContext) bool +} + +// NewRatioSampler returns a new Sampler with the given ratio +// +// A ratio of 1.0 samples 100% of transactions, a ratio of 0.5 +// samples ~50%, and so on. If the ratio provided does not lie +// within the range [0,1.0], NewRatioSampler will panic. +// +// The returned Sampler bases its decision on the value of the +// transaction ID, so there is no synchronization involved. +func NewRatioSampler(r float64) Sampler { + if r < 0 || r > 1.0 { + panic(errors.Errorf("ratio %v out of range [0,1.0]", r)) + } + var x big.Float + x.SetUint64(math.MaxUint64) + x.Mul(&x, big.NewFloat(r)) + ceil, _ := x.Uint64() + return ratioSampler{ceil} +} + +type ratioSampler struct { + ceil uint64 +} + +// Sample samples the transaction according to the configured +// ratio and pseudo-random source. +func (s ratioSampler) Sample(c TraceContext) bool { + v := binary.BigEndian.Uint64(c.Span[:]) + return v > 0 && v-1 < s.ceil +} diff --git a/vendor/go.elastic.co/apm/sanitizer.go b/vendor/go.elastic.co/apm/sanitizer.go new file mode 100644 index 00000000000..7f9014840ec --- /dev/null +++ b/vendor/go.elastic.co/apm/sanitizer.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apm + +import ( + "go.elastic.co/apm/internal/wildcard" + "go.elastic.co/apm/model" +) + +const redacted = "[REDACTED]" + +// sanitizeRequest sanitizes HTTP request data, redacting the +// values of cookies, headers and forms whose corresponding keys +// match any of the given wildcard patterns. +func sanitizeRequest(r *model.Request, matchers wildcard.Matchers) { + for _, c := range r.Cookies { + if !matchers.MatchAny(c.Name) { + continue + } + c.Value = redacted + } + sanitizeHeaders(r.Headers, matchers) + if r.Body != nil && r.Body.Form != nil { + for key, values := range r.Body.Form { + if !matchers.MatchAny(key) { + continue + } + for i := range values { + values[i] = redacted + } + } + } +} + +// sanitizeResponse sanitizes HTTP response data, redacting +// the values of response headers whose corresponding keys +// match any of the given wildcard patterns. +func sanitizeResponse(r *model.Response, matchers wildcard.Matchers) { + sanitizeHeaders(r.Headers, matchers) +} + +func sanitizeHeaders(headers model.Headers, matchers wildcard.Matchers) { + for i := range headers { + h := &headers[i] + if !matchers.MatchAny(h.Key) || len(h.Values) == 0 { + continue + } + h.Values = h.Values[:1] + h.Values[0] = redacted + } +} diff --git a/vendor/go.elastic.co/apm/span.go b/vendor/go.elastic.co/apm/span.go new file mode 100644 index 00000000000..85b244c1db9 --- /dev/null +++ b/vendor/go.elastic.co/apm/span.go @@ -0,0 +1,415 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apm + +import ( + cryptorand "crypto/rand" + "encoding/binary" + "strings" + "sync" + "time" + + "go.elastic.co/apm/stacktrace" +) + +// droppedSpanDataPool holds *SpanData which are used when the span is created +// for a nil or non-sampled trace context, without a transaction reference. +// +// Spans started with a non-nil transaction, even if it is non-sampled, are +// always created with the transaction's tracer span pool. +var droppedSpanDataPool sync.Pool + +// StartSpan starts and returns a new Span within the transaction, +// with the specified name, type, and optional parent span, and +// with the start time set to the current time. +// +// StartSpan always returns a non-nil Span, with a non-nil SpanData +// field. Its End method must be called when the span completes. +// +// If the span type contains two dots, they are assumed to separate +// the span type, subtype, and action; a single dot separates span +// type and subtype, and the action will not be set. +// +// StartSpan is equivalent to calling StartSpanOptions with +// SpanOptions.Parent set to the trace context of parent if +// parent is non-nil. +func (tx *Transaction) StartSpan(name, spanType string, parent *Span) *Span { + return tx.StartSpanOptions(name, spanType, SpanOptions{ + parent: parent, + }) +} + +// StartSpanOptions starts and returns a new Span within the transaction, +// with the specified name, type, and options. +// +// StartSpan always returns a non-nil Span. Its End method must be called +// when the span completes. +// +// If the span type contains two dots, they are assumed to separate the +// span type, subtype, and action; a single dot separates span type and +// subtype, and the action will not be set. +func (tx *Transaction) StartSpanOptions(name, spanType string, opts SpanOptions) *Span { + if tx == nil { + return newDroppedSpan() + } + + if opts.Parent == (TraceContext{}) { + if opts.parent != nil { + opts.Parent = opts.parent.TraceContext() + } else { + opts.Parent = tx.traceContext + } + } + transactionID := tx.traceContext.Span + + // Prevent tx from being ended while we're starting a span. + tx.mu.RLock() + defer tx.mu.RUnlock() + if tx.ended() { + return tx.tracer.StartSpan(name, spanType, transactionID, opts) + } + + // Calculate the span time relative to the transaction timestamp so + // that wall-clock adjustments occurring after the transaction start + // don't affect the span timestamp. + if opts.Start.IsZero() { + opts.Start = tx.timestamp.Add(time.Since(tx.timestamp)) + } else { + opts.Start = tx.timestamp.Add(opts.Start.Sub(tx.timestamp)) + } + span := tx.tracer.startSpan(name, spanType, transactionID, opts) + span.tx = tx + span.parent = opts.parent + + // Guard access to spansCreated, spansDropped, rand, and childrenTimer. + tx.TransactionData.mu.Lock() + defer tx.TransactionData.mu.Unlock() + if !span.traceContext.Options.Recorded() { + span.tracer = nil // span is dropped + } else if tx.maxSpans >= 0 && tx.spansCreated >= tx.maxSpans { + span.tracer = nil // span is dropped + tx.spansDropped++ + } else { + if opts.SpanID.Validate() == nil { + span.traceContext.Span = opts.SpanID + } else { + binary.LittleEndian.PutUint64(span.traceContext.Span[:], tx.rand.Uint64()) + } + span.stackFramesMinDuration = tx.spanFramesMinDuration + span.stackTraceLimit = tx.stackTraceLimit + tx.spansCreated++ + } + + if tx.breakdownMetricsEnabled { + if span.parent != nil { + span.parent.mu.Lock() + defer span.parent.mu.Unlock() + if !span.parent.ended() { + span.parent.childrenTimer.childStarted(span.timestamp) + } + } else { + tx.childrenTimer.childStarted(span.timestamp) + } + } + return span +} + +// StartSpan returns a new Span with the specified name, type, transaction ID, +// and options. The parent transaction context and transaction IDs must have +// valid, non-zero values, or else the span will be dropped. +// +// In most cases, you should use Transaction.StartSpan or Transaction.StartSpanOptions. +// This method is provided for corner-cases, such as starting a span after the +// containing transaction's End method has been called. Spans created in this +// way will not have the "max spans" configuration applied, nor will they be +// considered in any transaction's span count. +func (t *Tracer) StartSpan(name, spanType string, transactionID SpanID, opts SpanOptions) *Span { + if opts.Parent.Trace.Validate() != nil || opts.Parent.Span.Validate() != nil || transactionID.Validate() != nil { + return newDroppedSpan() + } + if !opts.Parent.Options.Recorded() { + return newDroppedSpan() + } + var spanID SpanID + if opts.SpanID.Validate() == nil { + spanID = opts.SpanID + } else { + if _, err := cryptorand.Read(spanID[:]); err != nil { + return newDroppedSpan() + } + } + if opts.Start.IsZero() { + opts.Start = time.Now() + } + span := t.startSpan(name, spanType, transactionID, opts) + span.traceContext.Span = spanID + + instrumentationConfig := t.instrumentationConfig() + span.stackFramesMinDuration = instrumentationConfig.spanFramesMinDuration + span.stackTraceLimit = instrumentationConfig.stackTraceLimit + + return span +} + +// SpanOptions holds options for Transaction.StartSpanOptions and Tracer.StartSpan. +type SpanOptions struct { + // Parent, if non-zero, holds the trace context of the parent span. + Parent TraceContext + + // SpanID holds the ID to assign to the span. If this is zero, a new ID + // will be generated and used instead. + SpanID SpanID + + // parent, if non-nil, holds the parent span. + // + // This is only used if Parent is zero, and is only available to internal + // callers of Transaction.StartSpanOptions. + parent *Span + + // Start is the start time of the span. If this has the zero value, + // time.Now() will be used instead. + // + // When a span is created using Transaction.StartSpanOptions, the + // span timestamp is internally calculated relative to the transaction + // timestamp. + // + // When Tracer.StartSpan is used, this timestamp should be pre-calculated + // as relative from the transaction start time, i.e. by calculating the + // time elapsed since the transaction started, and adding that to the + // transaction timestamp. Calculating the timstamp in this way will ensure + // monotonicity of events within a transaction. + Start time.Time +} + +func (t *Tracer) startSpan(name, spanType string, transactionID SpanID, opts SpanOptions) *Span { + sd, _ := t.spanDataPool.Get().(*SpanData) + if sd == nil { + sd = &SpanData{Duration: -1} + } + span := &Span{tracer: t, SpanData: sd} + span.Name = name + span.traceContext = opts.Parent + span.parentID = opts.Parent.Span + span.transactionID = transactionID + span.timestamp = opts.Start + span.Type = spanType + if dot := strings.IndexRune(spanType, '.'); dot != -1 { + span.Type = spanType[:dot] + span.Subtype = spanType[dot+1:] + if dot := strings.IndexRune(span.Subtype, '.'); dot != -1 { + span.Subtype, span.Action = span.Subtype[:dot], span.Subtype[dot+1:] + } + } + return span +} + +// newDropped returns a new Span with a non-nil SpanData. +func newDroppedSpan() *Span { + span, _ := droppedSpanDataPool.Get().(*Span) + if span == nil { + span = &Span{SpanData: &SpanData{}} + } + return span +} + +// Span describes an operation within a transaction. +type Span struct { + tracer *Tracer // nil if span is dropped + tx *Transaction + parent *Span + traceContext TraceContext + transactionID SpanID + + mu sync.RWMutex + + // SpanData holds the span data. This field is set to nil when + // the span's End method is called. + *SpanData +} + +// TraceContext returns the span's TraceContext. +func (s *Span) TraceContext() TraceContext { + if s == nil { + return TraceContext{} + } + return s.traceContext +} + +// SetStacktrace sets the stacktrace for the span, +// skipping the first skip number of frames, +// excluding the SetStacktrace function. +func (s *Span) SetStacktrace(skip int) { + if s == nil || s.dropped() { + return + } + s.mu.RLock() + defer s.mu.RUnlock() + if s.ended() { + return + } + s.SpanData.setStacktrace(skip + 1) +} + +// Dropped indicates whether or not the span is dropped, meaning it will not +// be included in any transaction. Spans are dropped by Transaction.StartSpan +// if the transaction is nil, non-sampled, or the transaction's max spans +// limit has been reached. +// +// Dropped may be used to avoid any expensive computation required to set +// the span's context. +func (s *Span) Dropped() bool { + return s == nil || s.dropped() +} + +func (s *Span) dropped() bool { + return s.tracer == nil +} + +// End marks the s as being complete; s must not be used after this. +// +// If s.Duration has not been set, End will set it to the elapsed time +// since the span's start time. +func (s *Span) End() { + s.mu.Lock() + defer s.mu.Unlock() + if s.ended() { + return + } + if s.Duration < 0 { + s.Duration = time.Since(s.timestamp) + } + if s.dropped() { + if s.tx == nil { + droppedSpanDataPool.Put(s.SpanData) + } else { + s.reportSelfTime() + s.reset(s.tx.tracer) + } + s.SpanData = nil + return + } + if len(s.stacktrace) == 0 && s.Duration >= s.stackFramesMinDuration { + s.setStacktrace(1) + } + if s.tx != nil { + s.reportSelfTime() + } + s.enqueue() + s.SpanData = nil +} + +// reportSelfTime reports the span's self-time to its transaction, and informs +// the parent that it has ended in order for the parent to later calculate its +// own self-time. +// +// This must only be called from Span.End, with s.mu.Lock held for writing and +// s.Duration set. +func (s *Span) reportSelfTime() { + endTime := s.timestamp.Add(s.Duration) + + // TODO(axw) try to find a way to not lock the transaction when + // ending every span. We already lock them when starting spans. + s.tx.mu.RLock() + defer s.tx.mu.RUnlock() + if s.tx.ended() || !s.tx.breakdownMetricsEnabled { + return + } + + s.tx.TransactionData.mu.Lock() + defer s.tx.TransactionData.mu.Unlock() + if s.parent != nil { + s.parent.mu.Lock() + if !s.parent.ended() { + s.parent.childrenTimer.childEnded(endTime) + } + s.parent.mu.Unlock() + } else { + s.tx.childrenTimer.childEnded(endTime) + } + s.tx.spanTimings.add(s.Type, s.Subtype, s.Duration-s.childrenTimer.finalDuration(endTime)) +} + +func (s *Span) enqueue() { + event := tracerEvent{eventType: spanEvent} + event.span.Span = s + event.span.SpanData = s.SpanData + select { + case s.tracer.events <- event: + default: + // Enqueuing a span should never block. + s.tracer.statsMu.Lock() + s.tracer.stats.SpansDropped++ + s.tracer.statsMu.Unlock() + s.reset(s.tracer) + } +} + +func (s *Span) ended() bool { + return s.SpanData == nil +} + +// SpanData holds the details for a span, and is embedded inside Span. +// When a span is ended or discarded, its SpanData field will be set +// to nil. +type SpanData struct { + parentID SpanID + stackFramesMinDuration time.Duration + stackTraceLimit int + timestamp time.Time + childrenTimer childrenTimer + + // Name holds the span name, initialized with the value passed to StartSpan. + Name string + + // Type holds the overarching span type, such as "db", and will be initialized + // with the value passed to StartSpan. + Type string + + // Subtype holds the span subtype, such as "mysql". This will initially be empty, + // and can be set after starting the span. + Subtype string + + // Action holds the span action, such as "query". This will initially be empty, + // and can be set after starting the span. + Action string + + // Duration holds the span duration, initialized to -1. + // + // If you do not update Duration, calling Span.End will calculate the + // duration based on the elapsed time since the span's start time. + Duration time.Duration + + // Context describes the context in which span occurs. + Context SpanContext + + stacktrace []stacktrace.Frame +} + +func (s *SpanData) setStacktrace(skip int) { + s.stacktrace = stacktrace.AppendStacktrace(s.stacktrace[:0], skip+1, s.stackTraceLimit) +} + +func (s *SpanData) reset(tracer *Tracer) { + *s = SpanData{ + Context: s.Context, + Duration: -1, + stacktrace: s.stacktrace[:0], + } + s.Context.reset() + tracer.spanDataPool.Put(s) +} diff --git a/vendor/go.elastic.co/apm/spancontext.go b/vendor/go.elastic.co/apm/spancontext.go new file mode 100644 index 00000000000..180fe1ddc34 --- /dev/null +++ b/vendor/go.elastic.co/apm/spancontext.go @@ -0,0 +1,193 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apm + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "go.elastic.co/apm/internal/apmhttputil" + "go.elastic.co/apm/model" +) + +// SpanContext provides methods for setting span context. +type SpanContext struct { + model model.SpanContext + destination model.DestinationSpanContext + destinationService model.DestinationServiceSpanContext + databaseRowsAffected int64 + database model.DatabaseSpanContext + http model.HTTPSpanContext +} + +// DatabaseSpanContext holds database span context. +type DatabaseSpanContext struct { + // Instance holds the database instance name. + Instance string + + // Statement holds the statement executed in the span, + // e.g. "SELECT * FROM foo". + Statement string + + // Type holds the database type, e.g. "sql". + Type string + + // User holds the username used for database access. + User string +} + +// DestinationServiceSpanContext holds destination service span span. +type DestinationServiceSpanContext struct { + // Name holds a name for the destination service, which may be used + // for grouping and labeling in service maps. + Name string + + // Resource holds an identifier for a destination service resource, + // such as a message queue. + Resource string +} + +func (c *SpanContext) build() *model.SpanContext { + switch { + case len(c.model.Tags) != 0: + case c.model.Database != nil: + case c.model.HTTP != nil: + case c.model.Destination != nil: + default: + return nil + } + return &c.model +} + +func (c *SpanContext) reset() { + *c = SpanContext{ + model: model.SpanContext{ + Tags: c.model.Tags[:0], + }, + } +} + +// SetTag calls SetLabel(key, value). +// +// SetTag is deprecated, and will be removed in a future major version. +func (c *SpanContext) SetTag(key, value string) { + c.SetLabel(key, value) +} + +// SetLabel sets a label in the context. +// +// Invalid characters ('.', '*', and '"') in the key will be replaced with +// underscores. +// +// If the value is numerical or boolean, then it will be sent to the server +// as a JSON number or boolean; otherwise it will converted to a string, using +// `fmt.Sprint` if necessary. String values longer than 1024 characters will +// be truncated. +func (c *SpanContext) SetLabel(key string, value interface{}) { + // Note that we do not attempt to de-duplicate the keys. + // This is OK, since json.Unmarshal will always take the + // final instance. + c.model.Tags = append(c.model.Tags, model.IfaceMapItem{ + Key: cleanLabelKey(key), + Value: makeLabelValue(value), + }) +} + +// SetDatabase sets the span context for database-related operations. +func (c *SpanContext) SetDatabase(db DatabaseSpanContext) { + c.database = model.DatabaseSpanContext{ + Instance: truncateString(db.Instance), + Statement: truncateLongString(db.Statement), + Type: truncateString(db.Type), + User: truncateString(db.User), + } + c.model.Database = &c.database +} + +// SetDatabaseRowsAffected records the number of rows affected by +// a database operation. +func (c *SpanContext) SetDatabaseRowsAffected(n int64) { + c.databaseRowsAffected = n + c.database.RowsAffected = &c.databaseRowsAffected +} + +// SetHTTPRequest sets the details of the HTTP request in the context. +// +// This function relates to client requests. If the request URL contains +// user info, it will be removed and excluded from the stored URL. +// +// SetHTTPRequest makes implicit calls to SetDestinationAddress and +// SetDestinationService, using details from req.URL. +func (c *SpanContext) SetHTTPRequest(req *http.Request) { + if req.URL == nil { + return + } + c.http.URL = req.URL + c.model.HTTP = &c.http + + addr, port := apmhttputil.DestinationAddr(req) + c.SetDestinationAddress(addr, port) + + destinationServiceURL := url.URL{Scheme: req.URL.Scheme, Host: req.URL.Host} + destinationServiceResource := destinationServiceURL.Host + if port != 0 && port == apmhttputil.SchemeDefaultPort(req.URL.Scheme) { + var hasDefaultPort bool + if n := len(destinationServiceURL.Host); n > 0 && destinationServiceURL.Host[n-1] != ']' { + if i := strings.LastIndexByte(destinationServiceURL.Host, ':'); i != -1 { + // Remove the default port from destination.service.name. + destinationServiceURL.Host = destinationServiceURL.Host[:i] + hasDefaultPort = true + } + } + if !hasDefaultPort { + // Add the default port to destination.service.resource. + destinationServiceResource = fmt.Sprintf("%s:%d", destinationServiceResource, port) + } + } + c.SetDestinationService(DestinationServiceSpanContext{ + Name: destinationServiceURL.String(), + Resource: destinationServiceResource, + }) +} + +// SetHTTPStatusCode records the HTTP response status code. +func (c *SpanContext) SetHTTPStatusCode(statusCode int) { + c.http.StatusCode = statusCode + c.model.HTTP = &c.http +} + +// SetDestinationAddress sets the destination address and port in the context. +// +// SetDestinationAddress has no effect when called when an empty addr. +func (c *SpanContext) SetDestinationAddress(addr string, port int) { + if addr != "" { + c.destination.Address = truncateString(addr) + c.destination.Port = port + c.model.Destination = &c.destination + } +} + +// SetDestinationService sets the destination service info in the context. +func (c *SpanContext) SetDestinationService(service DestinationServiceSpanContext) { + c.destinationService.Name = truncateString(service.Name) + c.destinationService.Resource = truncateString(service.Resource) + c.destination.Service = &c.destinationService + c.model.Destination = &c.destination +} diff --git a/vendor/go.elastic.co/apm/stacktrace.go b/vendor/go.elastic.co/apm/stacktrace.go new file mode 100644 index 00000000000..1fe5cda6158 --- /dev/null +++ b/vendor/go.elastic.co/apm/stacktrace.go @@ -0,0 +1,52 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apm + +import ( + "path/filepath" + + "go.elastic.co/apm/model" + "go.elastic.co/apm/stacktrace" +) + +func appendModelStacktraceFrames(out []model.StacktraceFrame, in []stacktrace.Frame) []model.StacktraceFrame { + for _, f := range in { + out = append(out, modelStacktraceFrame(f)) + } + return out +} + +func modelStacktraceFrame(in stacktrace.Frame) model.StacktraceFrame { + var abspath string + file := in.File + if file != "" { + if filepath.IsAbs(file) { + abspath = file + } + file = filepath.Base(file) + } + packagePath, function := stacktrace.SplitFunctionName(in.Function) + return model.StacktraceFrame{ + AbsolutePath: abspath, + File: file, + Line: in.Line, + Function: function, + Module: packagePath, + LibraryFrame: stacktrace.IsLibraryPackage(packagePath), + } +} diff --git a/vendor/go.elastic.co/apm/stacktrace/context.go b/vendor/go.elastic.co/apm/stacktrace/context.go new file mode 100644 index 00000000000..b9d292432d7 --- /dev/null +++ b/vendor/go.elastic.co/apm/stacktrace/context.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package stacktrace + +import ( + "bufio" + "net/http" + "os" + + "go.elastic.co/apm/model" +) + +// SetContext sets the source context for the given stack frames, +// with the specified number of pre- and post- lines. +func SetContext(setter ContextSetter, frames []model.StacktraceFrame, pre, post int) error { + for i := 0; i < len(frames); i++ { + if err := setter.SetContext(&frames[i], pre, post); err != nil { + return err + } + } + return nil +} + +// ContextSetter is an interface that can be used for setting the source +// context for a stack frame. +type ContextSetter interface { + // SetContext sets the source context for the given stack frame, + // with the specified number of pre- and post- lines. + SetContext(frame *model.StacktraceFrame, pre, post int) error +} + +// FileSystemContextSetter returns a ContextSetter that sets context +// by reading file contents from the provided http.FileSystem. +func FileSystemContextSetter(fs http.FileSystem) ContextSetter { + if fs == nil { + panic("fs is nil") + } + return &fileSystemContextSetter{fs} +} + +type fileSystemContextSetter struct { + http.FileSystem +} + +func (s *fileSystemContextSetter) SetContext(frame *model.StacktraceFrame, pre, post int) error { + if frame.Line <= 0 { + return nil + } + f, err := s.Open(frame.AbsolutePath) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer f.Close() + + var lineno int + var line string + preLines := make([]string, 0, pre) + postLines := make([]string, 0, post) + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + lineno++ + if lineno > frame.Line+post { + break + } + switch { + case lineno == frame.Line: + line = scanner.Text() + case lineno < frame.Line && lineno >= frame.Line-pre: + preLines = append(preLines, scanner.Text()) + case lineno > frame.Line && lineno <= frame.Line+post: + postLines = append(postLines, scanner.Text()) + } + } + if err := scanner.Err(); err != nil { + return err + } + frame.ContextLine = line + frame.PreContext = preLines + frame.PostContext = postLines + return nil +} diff --git a/vendor/go.elastic.co/apm/stacktrace/doc.go b/vendor/go.elastic.co/apm/stacktrace/doc.go new file mode 100644 index 00000000000..f8cffa455d8 --- /dev/null +++ b/vendor/go.elastic.co/apm/stacktrace/doc.go @@ -0,0 +1,20 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package stacktrace provides a simplified stack frame type, +// functions for obtaining stack frames, and related utilities. +package stacktrace diff --git a/vendor/go.elastic.co/apm/stacktrace/frame.go b/vendor/go.elastic.co/apm/stacktrace/frame.go new file mode 100644 index 00000000000..1c5053a2513 --- /dev/null +++ b/vendor/go.elastic.co/apm/stacktrace/frame.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package stacktrace + +// Frame describes a stack frame. +type Frame struct { + // File is the filename of the location of the stack frame. + // This may be either the absolute or base name of the file. + File string + + // Line is the 1-based line number of the location of the + // stack frame, or zero if unknown. + Line int + + // Function is the name of the function name for this stack + // frame. This should be package-qualified, and may be split + // using stacktrace.SplitFunctionName. + Function string +} diff --git a/vendor/go.elastic.co/apm/stacktrace/generate_library.bash b/vendor/go.elastic.co/apm/stacktrace/generate_library.bash new file mode 100644 index 00000000000..06bff3cb6ce --- /dev/null +++ b/vendor/go.elastic.co/apm/stacktrace/generate_library.bash @@ -0,0 +1,77 @@ +#!/bin/bash + +set -e + +_PKGS=$(go list -f '{{printf "\t%q,\n" .ImportPath}}' "$@" | grep -v vendor/golang_org) + +cat > library.go < 0 && n <= 10 { + pc = make([]uintptr, n) + pc = pc[:runtime.Callers(skip+1, pc)] + } else { + // n is negative or > 10, allocate space for 10 + // and make repeated calls to runtime.Callers + // until we've got all the frames or reached n. + pc = make([]uintptr, 10) + m := 0 + for { + m += runtime.Callers(skip+m+1, pc[m:]) + if m < len(pc) || m == n { + pc = pc[:m] + break + } + // Extend pc's length, ensuring its length + // extends to its new capacity to minimise + // the number of calls to runtime.Callers. + pc = append(pc, 0) + for len(pc) < cap(pc) { + pc = append(pc, 0) + } + } + } + return AppendCallerFrames(frames, pc, n) +} + +// AppendCallerFrames appends to n frames for the PCs in callers, +// and returns the extended slice. If n is negative, all available +// frames will be added. Multiple frames may exist for the same +// caller/PC in the case of function call inlining. +// +// See RuntimeFrame for information on what details are included. +func AppendCallerFrames(frames []Frame, callers []uintptr, n int) []Frame { + if len(callers) == 0 { + return frames + } + runtimeFrames := runtime.CallersFrames(callers) + for i := 0; n < 0 || i < n; i++ { + runtimeFrame, more := runtimeFrames.Next() + frames = append(frames, RuntimeFrame(runtimeFrame)) + if !more { + break + } + } + return frames +} + +// RuntimeFrame returns a Frame based on the given runtime.Frame. +// +// The resulting Frame will have the file path, package-qualified +// function name, and line number set. The function name can be +// split using SplitFunctionName, and the absolute path of the +// file and its base name can be determined using standard filepath +// functions. +func RuntimeFrame(in runtime.Frame) Frame { + return Frame{ + File: in.File, + Function: in.Function, + Line: in.Line, + } +} + +// SplitFunctionName splits the function name as formatted in +// runtime.Frame.Function, and returns the package path and +// function name components. +func SplitFunctionName(in string) (packagePath, function string) { + function = in + if function == "" { + return "", "" + } + // The last part of a package path will always have "." + // encoded as "%2e", so we can pick off the package path + // by finding the last part of the package path, and then + // the proceeding ".". + // + // Unexported method names may contain the package path. + // In these cases, the method receiver will be enclosed + // in parentheses, so we can treat that as the start of + // the function name. + sep := strings.Index(function, ".(") + if sep >= 0 { + packagePath = unescape(function[:sep]) + function = function[sep+1:] + } else { + offset := 0 + if sep := strings.LastIndex(function, "/"); sep >= 0 { + offset = sep + } + if sep := strings.IndexRune(function[offset+1:], '.'); sep >= 0 { + packagePath = unescape(function[:offset+1+sep]) + function = function[offset+1+sep+1:] + } + } + return packagePath, function +} + +func unescape(s string) string { + var n int + for i := 0; i < len(s); i++ { + if s[i] == '%' { + n++ + } + } + if n == 0 { + return s + } + bytes := make([]byte, 0, len(s)-2*n) + for i := 0; i < len(s); i++ { + b := s[i] + if b == '%' && i+2 < len(s) { + b = fromhex(s[i+1])<<4 | fromhex(s[i+2]) + i += 2 + } + bytes = append(bytes, b) + } + return string(bytes) +} + +func fromhex(b byte) byte { + if b >= 'a' { + return 10 + b - 'a' + } + return b - '0' +} diff --git a/vendor/go.elastic.co/apm/tracecontext.go b/vendor/go.elastic.co/apm/tracecontext.go new file mode 100644 index 00000000000..2983e85d6da --- /dev/null +++ b/vendor/go.elastic.co/apm/tracecontext.go @@ -0,0 +1,263 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apm + +import ( + "bytes" + "encoding/hex" + "fmt" + "regexp" + "unicode" + + "github.com/pkg/errors" +) + +var ( + errZeroTraceID = errors.New("zero trace-id is invalid") + errZeroSpanID = errors.New("zero span-id is invalid") +) + +// tracestateKeyRegexp holds a regular expression used for validating +// tracestate keys according to the standard rules: +// +// key = lcalpha 0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) +// key = ( lcalpha / DIGIT ) 0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) "@" lcalpha 0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) +// lcalpha = %x61-7A ; a-z +// +// nblkchr is used for defining valid runes for tracestate values. +var ( + tracestateKeyRegexp = regexp.MustCompile(`^[a-z](([a-z0-9_*/-]{0,255})|([a-z0-9_*/-]{0,240}@[a-z][a-z0-9_*/-]{0,13}))$`) + + nblkchr = &unicode.RangeTable{ + R16: []unicode.Range16{ + {0x21, 0x2B, 1}, + {0x2D, 0x3C, 1}, + {0x3E, 0x7E, 1}, + }, + LatinOffset: 3, + } +) + +const ( + traceOptionsRecordedFlag = 0x01 +) + +// TraceContext holds trace context for an incoming or outgoing request. +type TraceContext struct { + // Trace identifies the trace forest. + Trace TraceID + + // Span identifies a span: the parent span if this context + // corresponds to an incoming request, or the current span + // if this is an outgoing request. + Span SpanID + + // Options holds the trace options propagated by the parent. + Options TraceOptions + + // State holds the trace state. + State TraceState +} + +// TraceID identifies a trace forest. +type TraceID [16]byte + +// Validate validates the trace ID. +// This will return non-nil for a zero trace ID. +func (id TraceID) Validate() error { + if id.isZero() { + return errZeroTraceID + } + return nil +} + +func (id TraceID) isZero() bool { + return id == (TraceID{}) +} + +// String returns id encoded as hex. +func (id TraceID) String() string { + text, _ := id.MarshalText() + return string(text) +} + +// MarshalText returns id encoded as hex, satisfying encoding.TextMarshaler. +func (id TraceID) MarshalText() ([]byte, error) { + text := make([]byte, hex.EncodedLen(len(id))) + hex.Encode(text, id[:]) + return text, nil +} + +// SpanID identifies a span within a trace. +type SpanID [8]byte + +// Validate validates the span ID. +// This will return non-nil for a zero span ID. +func (id SpanID) Validate() error { + if id.isZero() { + return errZeroSpanID + } + return nil +} + +func (id SpanID) isZero() bool { + return id == SpanID{} +} + +// String returns id encoded as hex. +func (id SpanID) String() string { + text, _ := id.MarshalText() + return string(text) +} + +// MarshalText returns id encoded as hex, satisfying encoding.TextMarshaler. +func (id SpanID) MarshalText() ([]byte, error) { + text := make([]byte, hex.EncodedLen(len(id))) + hex.Encode(text, id[:]) + return text, nil +} + +// TraceOptions describes the options for a trace. +type TraceOptions uint8 + +// Recorded reports whether or not the transaction/span may have been (or may be) recorded. +func (o TraceOptions) Recorded() bool { + return (o & traceOptionsRecordedFlag) == traceOptionsRecordedFlag +} + +// WithRecorded changes the "recorded" flag, and returns the new options +// without modifying the original value. +func (o TraceOptions) WithRecorded(recorded bool) TraceOptions { + if recorded { + return o | traceOptionsRecordedFlag + } + return o & (0xFF ^ traceOptionsRecordedFlag) +} + +// TraceState holds vendor-specific state for a trace. +type TraceState struct { + head *TraceStateEntry +} + +// NewTraceState returns a TraceState based on entries. +func NewTraceState(entries ...TraceStateEntry) TraceState { + out := TraceState{} + var last *TraceStateEntry + for _, e := range entries { + e := e // copy + if last == nil { + out.head = &e + } else { + last.next = &e + } + last = &e + } + return out +} + +// String returns s as a comma-separated list of key-value pairs. +func (s TraceState) String() string { + if s.head == nil { + return "" + } + var buf bytes.Buffer + s.head.writeBuf(&buf) + for e := s.head.next; e != nil; e = e.next { + buf.WriteByte(',') + e.writeBuf(&buf) + } + return buf.String() +} + +// Validate validates the trace state. +// +// This will return non-nil if any entries are invalid, +// if there are too many entries, or if an entry key is +// repeated. +func (s TraceState) Validate() error { + if s.head == nil { + return nil + } + recorded := make(map[string]int) + var i int + for e := s.head; e != nil; e = e.next { + if i == 32 { + return errors.New("tracestate contains more than the maximum allowed number of entries, 32") + } + if err := e.Validate(); err != nil { + return errors.Wrapf(err, "invalid tracestate entry at position %d", i) + } + if prev, ok := recorded[e.Key]; ok { + return fmt.Errorf("duplicate tracestate key %q at positions %d and %d", e.Key, prev, i) + } + recorded[e.Key] = i + i++ + } + return nil +} + +// TraceStateEntry holds a trace state entry: a key/value pair +// representing state for a vendor. +type TraceStateEntry struct { + next *TraceStateEntry + + // Key holds a vendor (and optionally, tenant) ID. + Key string + + // Value holds a string representing trace state. + Value string +} + +func (e *TraceStateEntry) writeBuf(buf *bytes.Buffer) { + buf.WriteString(e.Key) + buf.WriteByte('=') + buf.WriteString(e.Value) +} + +// Validate validates the trace state entry. +// +// This will return non-nil if either the key or value is invalid. +func (e *TraceStateEntry) Validate() error { + if !tracestateKeyRegexp.MatchString(e.Key) { + return fmt.Errorf("invalid key %q", e.Key) + } + if err := e.validateValue(); err != nil { + return errors.Wrapf(err, "invalid value for key %q", e.Key) + } + return nil +} + +func (e *TraceStateEntry) validateValue() error { + if e.Value == "" { + return errors.New("value is empty") + } + runes := []rune(e.Value) + n := len(runes) + if n > 256 { + return errors.Errorf("value contains %d characters, maximum allowed is 256", n) + } + if !unicode.In(runes[n-1], nblkchr) { + return errors.Errorf("value contains invalid character %q", runes[n-1]) + } + for _, r := range runes[:n-1] { + if r != 0x20 && !unicode.In(r, nblkchr) { + return errors.Errorf("value contains invalid character %q", r) + } + } + return nil +} diff --git a/vendor/go.elastic.co/apm/tracer.go b/vendor/go.elastic.co/apm/tracer.go new file mode 100644 index 00000000000..3170e2f226b --- /dev/null +++ b/vendor/go.elastic.co/apm/tracer.go @@ -0,0 +1,1170 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apm + +import ( + "bytes" + "compress/zlib" + "context" + "io" + "log" + "math/rand" + "sync" + "sync/atomic" + "time" + + "go.elastic.co/apm/apmconfig" + "go.elastic.co/apm/internal/apmlog" + "go.elastic.co/apm/internal/configutil" + "go.elastic.co/apm/internal/iochan" + "go.elastic.co/apm/internal/ringbuffer" + "go.elastic.co/apm/internal/wildcard" + "go.elastic.co/apm/model" + "go.elastic.co/apm/stacktrace" + "go.elastic.co/apm/transport" + "go.elastic.co/fastjson" +) + +const ( + defaultPreContext = 3 + defaultPostContext = 3 + gracePeriodJitter = 0.1 // +/- 10% + tracerEventChannelCap = 1000 +) + +var ( + // DefaultTracer is the default global Tracer, set at package + // initialization time, configured via environment variables. + // + // This will always be initialized to a non-nil value. If any + // of the environment variables are invalid, the corresponding + // errors will be logged to stderr and the default values will + // be used instead. + DefaultTracer *Tracer +) + +func init() { + var opts TracerOptions + opts.initDefaults(true) + DefaultTracer = newTracer(opts) +} + +// TracerOptions holds initial tracer options, for passing to NewTracerOptions. +type TracerOptions struct { + // ServiceName holds the service name. + // + // If ServiceName is empty, the service name will be defined using the + // ELASTIC_APM_SERVICE_NAME environment variable, or if that is not set, + // the executable name. + ServiceName string + + // ServiceVersion holds the service version. + // + // If ServiceVersion is empty, the service version will be defined using + // the ELASTIC_APM_SERVICE_VERSION environment variable. + ServiceVersion string + + // ServiceEnvironment holds the service environment. + // + // If ServiceEnvironment is empty, the service environment will be defined + // using the ELASTIC_APM_ENVIRONMENT environment variable. + ServiceEnvironment string + + // Transport holds the transport to use for sending events. + // + // If Transport is nil, transport.Default will be used. + // + // If Transport implements apmconfig.Watcher, the tracer will begin watching + // for remote changes immediately. This behaviour can be disabled by setting + // the environment variable ELASTIC_APM_CENTRAL_CONFIG=false. + Transport transport.Transport + + requestDuration time.Duration + metricsInterval time.Duration + maxSpans int + requestSize int + bufferSize int + metricsBufferSize int + sampler Sampler + sanitizedFieldNames wildcard.Matchers + disabledMetrics wildcard.Matchers + captureHeaders bool + captureBody CaptureBodyMode + spanFramesMinDuration time.Duration + stackTraceLimit int + active bool + configWatcher apmconfig.Watcher + breakdownMetrics bool + propagateLegacyHeader bool + profileSender profileSender + cpuProfileInterval time.Duration + cpuProfileDuration time.Duration + heapProfileInterval time.Duration +} + +// initDefaults updates opts with default values. +func (opts *TracerOptions) initDefaults(continueOnError bool) error { + var errs []error + failed := func(err error) bool { + if err == nil { + return false + } + errs = append(errs, err) + return true + } + + requestDuration, err := initialRequestDuration() + if failed(err) { + requestDuration = defaultAPIRequestTime + } + + metricsInterval, err := initialMetricsInterval() + if err != nil { + metricsInterval = defaultMetricsInterval + errs = append(errs, err) + } + + requestSize, err := initialAPIRequestSize() + if err != nil { + requestSize = int(defaultAPIRequestSize) + errs = append(errs, err) + } + + bufferSize, err := initialAPIBufferSize() + if err != nil { + bufferSize = int(defaultAPIBufferSize) + errs = append(errs, err) + } + + metricsBufferSize, err := initialMetricsBufferSize() + if err != nil { + metricsBufferSize = int(defaultMetricsBufferSize) + errs = append(errs, err) + } + + maxSpans, err := initialMaxSpans() + if failed(err) { + maxSpans = defaultMaxSpans + } + + sampler, err := initialSampler() + if failed(err) { + sampler = nil + } + + captureHeaders, err := initialCaptureHeaders() + if failed(err) { + captureHeaders = defaultCaptureHeaders + } + + captureBody, err := initialCaptureBody() + if failed(err) { + captureBody = CaptureBodyOff + } + + spanFramesMinDuration, err := initialSpanFramesMinDuration() + if failed(err) { + spanFramesMinDuration = defaultSpanFramesMinDuration + } + + stackTraceLimit, err := initialStackTraceLimit() + if failed(err) { + stackTraceLimit = defaultStackTraceLimit + } + + active, err := initialActive() + if failed(err) { + active = true + } + + centralConfigEnabled, err := initialCentralConfigEnabled() + if failed(err) { + centralConfigEnabled = true + } + + breakdownMetricsEnabled, err := initialBreakdownMetricsEnabled() + if failed(err) { + breakdownMetricsEnabled = true + } + + propagateLegacyHeader, err := initialUseElasticTraceparentHeader() + if failed(err) { + propagateLegacyHeader = true + } + + cpuProfileInterval, cpuProfileDuration, err := initialCPUProfileIntervalDuration() + if failed(err) { + cpuProfileInterval = 0 + cpuProfileDuration = 0 + } + heapProfileInterval, err := initialHeapProfileInterval() + if failed(err) { + heapProfileInterval = 0 + } + + if opts.ServiceName != "" { + err := validateServiceName(opts.ServiceName) + if failed(err) { + opts.ServiceName = "" + } + } + + if len(errs) != 0 && !continueOnError { + return errs[0] + } + for _, err := range errs { + log.Printf("[apm]: %s", err) + } + + opts.requestDuration = requestDuration + opts.metricsInterval = metricsInterval + opts.requestSize = requestSize + opts.bufferSize = bufferSize + opts.metricsBufferSize = metricsBufferSize + opts.maxSpans = maxSpans + opts.sampler = sampler + opts.sanitizedFieldNames = initialSanitizedFieldNames() + opts.disabledMetrics = initialDisabledMetrics() + opts.breakdownMetrics = breakdownMetricsEnabled + opts.captureHeaders = captureHeaders + opts.captureBody = captureBody + opts.spanFramesMinDuration = spanFramesMinDuration + opts.stackTraceLimit = stackTraceLimit + opts.active = active + opts.propagateLegacyHeader = propagateLegacyHeader + if opts.Transport == nil { + opts.Transport = transport.Default + } + if centralConfigEnabled { + if cw, ok := opts.Transport.(apmconfig.Watcher); ok { + opts.configWatcher = cw + } + } + if ps, ok := opts.Transport.(profileSender); ok { + opts.profileSender = ps + opts.cpuProfileInterval = cpuProfileInterval + opts.cpuProfileDuration = cpuProfileDuration + opts.heapProfileInterval = heapProfileInterval + } + + serviceName, serviceVersion, serviceEnvironment := initialService() + if opts.ServiceName == "" { + opts.ServiceName = serviceName + } + if opts.ServiceVersion == "" { + opts.ServiceVersion = serviceVersion + } + if opts.ServiceEnvironment == "" { + opts.ServiceEnvironment = serviceEnvironment + } + return nil +} + +// Tracer manages the sampling and sending of transactions to +// Elastic APM. +// +// Transactions are buffered until they are flushed (forcibly +// with a Flush call, or when the flush timer expires), or when +// the maximum transaction queue size is reached. Failure to +// send will be periodically retried. Once the queue limit has +// been reached, new transactions will replace older ones in +// the queue. +// +// Errors are sent as soon as possible, but will buffered and +// later sent in bulk if the tracer is busy, or otherwise cannot +// send to the server, e.g. due to network failure. There is +// a limit to the number of errors that will be buffered, and +// once that limit has been reached, new errors will be dropped +// until the queue is drained. +// +// The exported fields be altered or replaced any time up until +// any Tracer methods have been invoked. +type Tracer struct { + Transport transport.Transport + Service struct { + Name string + Version string + Environment string + } + + process *model.Process + system *model.System + + active int32 + bufferSize int + metricsBufferSize int + closing chan struct{} + closed chan struct{} + forceFlush chan chan<- struct{} + forceSendMetrics chan chan<- struct{} + configCommands chan tracerConfigCommand + configWatcher chan apmconfig.Watcher + events chan tracerEvent + breakdownMetrics *breakdownMetrics + profileSender profileSender + + statsMu sync.Mutex + stats TracerStats + + // instrumentationConfig_ must only be accessed and mutated + // using Tracer.instrumentationConfig() and Tracer.setInstrumentationConfig(). + instrumentationConfigInternal *instrumentationConfig + + errorDataPool sync.Pool + spanDataPool sync.Pool + transactionDataPool sync.Pool +} + +// NewTracer returns a new Tracer, using the default transport, +// and with the specified service name and version if specified. +// This is equivalent to calling NewTracerOptions with a +// TracerOptions having ServiceName and ServiceVersion set to +// the provided arguments. +func NewTracer(serviceName, serviceVersion string) (*Tracer, error) { + return NewTracerOptions(TracerOptions{ + ServiceName: serviceName, + ServiceVersion: serviceVersion, + }) +} + +// NewTracerOptions returns a new Tracer using the provided options. +// See TracerOptions for details on the options, and their default +// values. +func NewTracerOptions(opts TracerOptions) (*Tracer, error) { + if err := opts.initDefaults(false); err != nil { + return nil, err + } + return newTracer(opts), nil +} + +func newTracer(opts TracerOptions) *Tracer { + t := &Tracer{ + Transport: opts.Transport, + process: ¤tProcess, + system: &localSystem, + closing: make(chan struct{}), + closed: make(chan struct{}), + forceFlush: make(chan chan<- struct{}), + forceSendMetrics: make(chan chan<- struct{}), + configCommands: make(chan tracerConfigCommand), + configWatcher: make(chan apmconfig.Watcher), + events: make(chan tracerEvent, tracerEventChannelCap), + active: 1, + breakdownMetrics: newBreakdownMetrics(), + bufferSize: opts.bufferSize, + metricsBufferSize: opts.metricsBufferSize, + profileSender: opts.profileSender, + instrumentationConfigInternal: &instrumentationConfig{ + local: make(map[string]func(*instrumentationConfigValues)), + }, + } + t.Service.Name = opts.ServiceName + t.Service.Version = opts.ServiceVersion + t.Service.Environment = opts.ServiceEnvironment + t.breakdownMetrics.enabled = opts.breakdownMetrics + + // Initialise local transaction config. + t.setLocalInstrumentationConfig(envCaptureBody, func(cfg *instrumentationConfigValues) { + cfg.captureBody = opts.captureBody + }) + t.setLocalInstrumentationConfig(envCaptureHeaders, func(cfg *instrumentationConfigValues) { + cfg.captureHeaders = opts.captureHeaders + }) + t.setLocalInstrumentationConfig(envMaxSpans, func(cfg *instrumentationConfigValues) { + cfg.maxSpans = opts.maxSpans + }) + t.setLocalInstrumentationConfig(envTransactionSampleRate, func(cfg *instrumentationConfigValues) { + cfg.sampler = opts.sampler + }) + t.setLocalInstrumentationConfig(envSpanFramesMinDuration, func(cfg *instrumentationConfigValues) { + cfg.spanFramesMinDuration = opts.spanFramesMinDuration + }) + t.setLocalInstrumentationConfig(envStackTraceLimit, func(cfg *instrumentationConfigValues) { + cfg.stackTraceLimit = opts.stackTraceLimit + }) + t.setLocalInstrumentationConfig(envUseElasticTraceparentHeader, func(cfg *instrumentationConfigValues) { + cfg.propagateLegacyHeader = opts.propagateLegacyHeader + }) + + if !opts.active { + t.active = 0 + close(t.closed) + return t + } + + go t.loop() + t.configCommands <- func(cfg *tracerConfig) { + cfg.cpuProfileInterval = opts.cpuProfileInterval + cfg.cpuProfileDuration = opts.cpuProfileDuration + cfg.heapProfileInterval = opts.heapProfileInterval + cfg.metricsInterval = opts.metricsInterval + cfg.requestDuration = opts.requestDuration + cfg.requestSize = opts.requestSize + cfg.sanitizedFieldNames = opts.sanitizedFieldNames + cfg.disabledMetrics = opts.disabledMetrics + cfg.preContext = defaultPreContext + cfg.postContext = defaultPostContext + cfg.metricsGatherers = []MetricsGatherer{newBuiltinMetricsGatherer(t)} + if apmlog.DefaultLogger != nil { + cfg.logger = apmlog.DefaultLogger + } + } + if opts.configWatcher != nil { + t.configWatcher <- opts.configWatcher + } + return t +} + +// tracerConfig holds the tracer's runtime configuration, which may be modified +// by sending a tracerConfigCommand to the tracer's configCommands channel. +type tracerConfig struct { + requestSize int + requestDuration time.Duration + metricsInterval time.Duration + logger WarningLogger + metricsGatherers []MetricsGatherer + contextSetter stacktrace.ContextSetter + preContext, postContext int + sanitizedFieldNames wildcard.Matchers + disabledMetrics wildcard.Matchers + cpuProfileDuration time.Duration + cpuProfileInterval time.Duration + heapProfileInterval time.Duration +} + +type tracerConfigCommand func(*tracerConfig) + +// Close closes the Tracer, preventing transactions from being +// sent to the APM server. +func (t *Tracer) Close() { + select { + case <-t.closing: + default: + close(t.closing) + } + <-t.closed +} + +// Flush waits for the Tracer to flush any transactions and errors it currently +// has queued to the APM server, the tracer is stopped, or the abort channel +// is signaled. +func (t *Tracer) Flush(abort <-chan struct{}) { + flushed := make(chan struct{}, 1) + select { + case t.forceFlush <- flushed: + select { + case <-abort: + case <-flushed: + case <-t.closed: + } + case <-t.closed: + } +} + +// Active reports whether the tracer is active. If the tracer is inactive, +// no transactions or errors will be sent to the Elastic APM server. +func (t *Tracer) Active() bool { + return atomic.LoadInt32(&t.active) == 1 +} + +// SetRequestDuration sets the maximum amount of time to keep a request open +// to the APM server for streaming data before closing the stream and starting +// a new request. +func (t *Tracer) SetRequestDuration(d time.Duration) { + t.sendConfigCommand(func(cfg *tracerConfig) { + cfg.requestDuration = d + }) +} + +// SetMetricsInterval sets the metrics interval -- the amount of time in +// between metrics samples being gathered. +func (t *Tracer) SetMetricsInterval(d time.Duration) { + t.sendConfigCommand(func(cfg *tracerConfig) { + cfg.metricsInterval = d + }) +} + +// SetContextSetter sets the stacktrace.ContextSetter to be used for +// setting stacktrace source context. If nil (which is the initial +// value), no context will be set. +func (t *Tracer) SetContextSetter(setter stacktrace.ContextSetter) { + t.sendConfigCommand(func(cfg *tracerConfig) { + cfg.contextSetter = setter + }) +} + +// SetLogger sets the Logger to be used for logging the operation of +// the tracer. +// +// If logger implements WarningLogger, its Warningf method will be used +// for logging warnings. Otherwise, warnings will logged using Debugf. +// +// The tracer is initialized with a default logger configured with the +// environment variables ELASTIC_APM_LOG_FILE and ELASTIC_APM_LOG_LEVEL. +// Calling SetLogger will replace the default logger. +func (t *Tracer) SetLogger(logger Logger) { + t.sendConfigCommand(func(cfg *tracerConfig) { + cfg.logger = makeWarningLogger(logger) + }) +} + +// SetSanitizedFieldNames sets the wildcard patterns that will be used to +// match cookie and form field names for sanitization. Fields matching any +// of the the supplied patterns will have their values redacted. If +// SetSanitizedFieldNames is called with no arguments, then no fields +// will be redacted. +func (t *Tracer) SetSanitizedFieldNames(patterns ...string) error { + var matchers wildcard.Matchers + if len(patterns) != 0 { + matchers = make(wildcard.Matchers, len(patterns)) + for i, p := range patterns { + matchers[i] = configutil.ParseWildcardPattern(p) + } + } + t.sendConfigCommand(func(cfg *tracerConfig) { + cfg.sanitizedFieldNames = matchers + }) + return nil +} + +// RegisterMetricsGatherer registers g for periodic (or forced) metrics +// gathering by t. +// +// RegisterMetricsGatherer returns a function which will deregister g. +// It may safely be called multiple times. +func (t *Tracer) RegisterMetricsGatherer(g MetricsGatherer) func() { + // Wrap g in a pointer-to-struct, so we can safely compare. + wrapped := &struct{ MetricsGatherer }{MetricsGatherer: g} + t.sendConfigCommand(func(cfg *tracerConfig) { + cfg.metricsGatherers = append(cfg.metricsGatherers, wrapped) + }) + deregister := func(cfg *tracerConfig) { + for i, g := range cfg.metricsGatherers { + if g != wrapped { + continue + } + cfg.metricsGatherers = append(cfg.metricsGatherers[:i], cfg.metricsGatherers[i+1:]...) + } + } + var once sync.Once + return func() { + once.Do(func() { + t.sendConfigCommand(deregister) + }) + } +} + +// SetConfigWatcher sets w as the config watcher. +// +// By default, the tracer will be configured to use the transport for +// watching config, if the transport implements apmconfig.Watcher. This +// can be overridden by calling SetConfigWatcher. +// +// If w is nil, config watching will be stopped. +// +// Calling SetConfigWatcher will discard any previously observed remote +// config, reverting to local config until a config change from w is +// observed. +func (t *Tracer) SetConfigWatcher(w apmconfig.Watcher) { + select { + case t.configWatcher <- w: + case <-t.closing: + case <-t.closed: + } +} + +func (t *Tracer) sendConfigCommand(cmd tracerConfigCommand) { + select { + case t.configCommands <- cmd: + case <-t.closing: + case <-t.closed: + } +} + +// SetSampler sets the sampler the tracer. +// +// It is valid to pass nil, in which case all transactions will be sampled. +// +// Configuration via Kibana takes precedence over local configuration, so +// if sampling has been configured via Kibana, this call will not have any +// effect until/unless that configuration has been removed. +func (t *Tracer) SetSampler(s Sampler) { + t.setLocalInstrumentationConfig(envTransactionSampleRate, func(cfg *instrumentationConfigValues) { + cfg.sampler = s + }) +} + +// SetMaxSpans sets the maximum number of spans that will be added +// to a transaction before dropping spans. +// +// Passing in zero will disable all spans, while negative values will +// permit an unlimited number of spans. +func (t *Tracer) SetMaxSpans(n int) { + t.setLocalInstrumentationConfig(envMaxSpans, func(cfg *instrumentationConfigValues) { + cfg.maxSpans = n + }) +} + +// SetSpanFramesMinDuration sets the minimum duration for a span after which +// we will capture its stack frames. +func (t *Tracer) SetSpanFramesMinDuration(d time.Duration) { + t.setLocalInstrumentationConfig(envMaxSpans, func(cfg *instrumentationConfigValues) { + cfg.spanFramesMinDuration = d + }) +} + +// SetStackTraceLimit sets the the maximum number of stack frames to collect +// for each stack trace. If limit is negative, then all frames will be collected. +func (t *Tracer) SetStackTraceLimit(limit int) { + t.setLocalInstrumentationConfig(envMaxSpans, func(cfg *instrumentationConfigValues) { + cfg.stackTraceLimit = limit + }) +} + +// SetCaptureHeaders enables or disables capturing of HTTP headers. +func (t *Tracer) SetCaptureHeaders(capture bool) { + t.setLocalInstrumentationConfig(envMaxSpans, func(cfg *instrumentationConfigValues) { + cfg.captureHeaders = capture + }) +} + +// SetCaptureBody sets the HTTP request body capture mode. +func (t *Tracer) SetCaptureBody(mode CaptureBodyMode) { + t.setLocalInstrumentationConfig(envMaxSpans, func(cfg *instrumentationConfigValues) { + cfg.captureBody = mode + }) +} + +// SendMetrics forces the tracer to gather and send metrics immediately, +// blocking until the metrics have been sent or the abort channel is +// signalled. +func (t *Tracer) SendMetrics(abort <-chan struct{}) { + sent := make(chan struct{}, 1) + select { + case t.forceSendMetrics <- sent: + select { + case <-abort: + case <-sent: + case <-t.closed: + } + case <-t.closed: + } +} + +// Stats returns the current TracerStats. This will return the most +// recent values even after the tracer has been closed. +func (t *Tracer) Stats() TracerStats { + t.statsMu.Lock() + stats := t.stats + t.statsMu.Unlock() + return stats +} + +func (t *Tracer) loop() { + ctx, cancelContext := context.WithCancel(context.Background()) + defer cancelContext() + defer close(t.closed) + defer atomic.StoreInt32(&t.active, 0) + + var req iochan.ReadRequest + var requestBuf bytes.Buffer + var metadata []byte + var gracePeriod time.Duration = -1 + var flushed chan<- struct{} + var requestBufTransactions, requestBufSpans, requestBufErrors, requestBufMetricsets uint64 + zlibWriter, _ := zlib.NewWriterLevel(&requestBuf, zlib.BestSpeed) + zlibFlushed := true + zlibClosed := false + iochanReader := iochan.NewReader() + requestBytesRead := 0 + requestActive := false + closeRequest := false + flushRequest := false + requestResult := make(chan error, 1) + requestTimer := time.NewTimer(0) + requestTimerActive := false + if !requestTimer.Stop() { + <-requestTimer.C + } + + // Run another goroutine to perform the blocking requests, + // communicating with the tracer loop to obtain stream data. + sendStreamRequest := make(chan time.Duration) + defer close(sendStreamRequest) + go func() { + jitterRand := rand.New(rand.NewSource(time.Now().UnixNano())) + for gracePeriod := range sendStreamRequest { + if gracePeriod > 0 { + select { + case <-time.After(jitterDuration(gracePeriod, jitterRand, gracePeriodJitter)): + case <-ctx.Done(): + } + } + requestResult <- t.Transport.SendStream(ctx, iochanReader) + } + }() + + var breakdownMetricsLimitWarningLogged bool + var stats TracerStats + var metrics Metrics + var sentMetrics chan<- struct{} + var gatheringMetrics bool + var metricsTimerStart time.Time + metricsBuffer := ringbuffer.New(t.metricsBufferSize) + gatheredMetrics := make(chan struct{}, 1) + metricsTimer := time.NewTimer(0) + if !metricsTimer.Stop() { + <-metricsTimer.C + } + + var lastConfigChange map[string]string + var configChanges <-chan apmconfig.Change + var stopConfigWatcher func() + defer func() { + if stopConfigWatcher != nil { + stopConfigWatcher() + } + }() + + cpuProfilingState := newCPUProfilingState(t.profileSender) + heapProfilingState := newHeapProfilingState(t.profileSender) + + var cfg tracerConfig + buffer := ringbuffer.New(t.bufferSize) + buffer.Evicted = func(h ringbuffer.BlockHeader) { + switch h.Tag { + case errorBlockTag: + stats.ErrorsDropped++ + case spanBlockTag: + stats.SpansDropped++ + case transactionBlockTag: + stats.TransactionsDropped++ + } + } + modelWriter := modelWriter{ + buffer: buffer, + metricsBuffer: metricsBuffer, + cfg: &cfg, + stats: &stats, + } + + for { + var gatherMetrics bool + select { + case <-t.closing: + cancelContext() // informs transport that EOF is expected + iochanReader.CloseRead(io.EOF) + return + case cmd := <-t.configCommands: + oldMetricsInterval := cfg.metricsInterval + cmd(&cfg) + cpuProfilingState.updateConfig(cfg.cpuProfileInterval, cfg.cpuProfileDuration) + heapProfilingState.updateConfig(cfg.heapProfileInterval, 0) + if !gatheringMetrics && cfg.metricsInterval != oldMetricsInterval { + if metricsTimerStart.IsZero() { + if cfg.metricsInterval > 0 { + metricsTimer.Reset(cfg.metricsInterval) + metricsTimerStart = time.Now() + } + } else { + if cfg.metricsInterval <= 0 { + metricsTimerStart = time.Time{} + if !metricsTimer.Stop() { + <-metricsTimer.C + } + } else { + alreadyPassed := time.Since(metricsTimerStart) + if alreadyPassed >= cfg.metricsInterval { + metricsTimer.Reset(0) + } else { + metricsTimer.Reset(cfg.metricsInterval - alreadyPassed) + } + } + } + } + continue + case cw := <-t.configWatcher: + if configChanges != nil { + stopConfigWatcher() + t.updateRemoteConfig(cfg.logger, lastConfigChange, nil) + lastConfigChange = nil + configChanges = nil + } + if cw == nil { + continue + } + var configWatcherContext context.Context + var watchParams apmconfig.WatchParams + watchParams.Service.Name = t.Service.Name + watchParams.Service.Environment = t.Service.Environment + configWatcherContext, stopConfigWatcher = context.WithCancel(ctx) + configChanges = cw.WatchConfig(configWatcherContext, watchParams) + // Silence go vet's "possible context leak" false positive. + // We call a previous stopConfigWatcher before reassigning + // the variable, and we have a defer at the top level of the + // loop method that will call the final stopConfigWatcher + // value on method exit. + _ = stopConfigWatcher + continue + case change, ok := <-configChanges: + if !ok { + configChanges = nil + continue + } + if change.Err != nil { + if cfg.logger != nil { + cfg.logger.Errorf("config request failed: %s", change.Err) + } + } else { + t.updateRemoteConfig(cfg.logger, lastConfigChange, change.Attrs) + lastConfigChange = change.Attrs + } + continue + case event := <-t.events: + switch event.eventType { + case transactionEvent: + if !t.breakdownMetrics.recordTransaction(event.tx.TransactionData) { + if !breakdownMetricsLimitWarningLogged && cfg.logger != nil { + cfg.logger.Warningf("%s", breakdownMetricsLimitWarning) + breakdownMetricsLimitWarningLogged = true + } + } + modelWriter.writeTransaction(event.tx.Transaction, event.tx.TransactionData) + case spanEvent: + modelWriter.writeSpan(event.span.Span, event.span.SpanData) + case errorEvent: + modelWriter.writeError(event.err) + // Flush the buffer to transmit the error immediately. + flushRequest = true + } + case <-requestTimer.C: + requestTimerActive = false + closeRequest = true + case <-metricsTimer.C: + metricsTimerStart = time.Time{} + gatherMetrics = !gatheringMetrics + case sentMetrics = <-t.forceSendMetrics: + if !metricsTimerStart.IsZero() { + if !metricsTimer.Stop() { + <-metricsTimer.C + } + metricsTimerStart = time.Time{} + } + gatherMetrics = !gatheringMetrics + case <-gatheredMetrics: + modelWriter.writeMetrics(&metrics) + gatheringMetrics = false + flushRequest = true + if cfg.metricsInterval > 0 { + metricsTimerStart = time.Now() + metricsTimer.Reset(cfg.metricsInterval) + } + case <-cpuProfilingState.timer.C: + cpuProfilingState.start(ctx, cfg.logger, t.metadataReader()) + case <-cpuProfilingState.finished: + cpuProfilingState.resetTimer() + case <-heapProfilingState.timer.C: + heapProfilingState.start(ctx, cfg.logger, t.metadataReader()) + case <-heapProfilingState.finished: + heapProfilingState.resetTimer() + case flushed = <-t.forceFlush: + // Drain any objects buffered in the channels. + for n := len(t.events); n > 0; n-- { + event := <-t.events + switch event.eventType { + case transactionEvent: + if !t.breakdownMetrics.recordTransaction(event.tx.TransactionData) { + if !breakdownMetricsLimitWarningLogged && cfg.logger != nil { + cfg.logger.Warningf("%s", breakdownMetricsLimitWarning) + breakdownMetricsLimitWarningLogged = true + } + } + modelWriter.writeTransaction(event.tx.Transaction, event.tx.TransactionData) + case spanEvent: + modelWriter.writeSpan(event.span.Span, event.span.SpanData) + case errorEvent: + modelWriter.writeError(event.err) + } + } + if !requestActive && buffer.Len() == 0 && metricsBuffer.Len() == 0 { + flushed <- struct{}{} + continue + } + closeRequest = true + case req = <-iochanReader.C: + case err := <-requestResult: + if err != nil { + stats.Errors.SendStream++ + gracePeriod = nextGracePeriod(gracePeriod) + if cfg.logger != nil { + logf := cfg.logger.Debugf + if err, ok := err.(*transport.HTTPError); ok && err.Response.StatusCode == 404 { + // 404 typically means the server is too old, meaning + // the error is due to a misconfigured environment. + logf = cfg.logger.Errorf + } + logf("request failed: %s (next request in ~%s)", err, gracePeriod) + } + } else { + gracePeriod = -1 // Reset grace period after success. + stats.TransactionsSent += requestBufTransactions + stats.SpansSent += requestBufSpans + stats.ErrorsSent += requestBufErrors + if cfg.logger != nil { + s := func(n uint64) string { + if n != 1 { + return "s" + } + return "" + } + cfg.logger.Debugf( + "sent request with %d transaction%s, %d span%s, %d error%s, %d metricset%s", + requestBufTransactions, s(requestBufTransactions), + requestBufSpans, s(requestBufSpans), + requestBufErrors, s(requestBufErrors), + requestBufMetricsets, s(requestBufMetricsets), + ) + } + } + if !stats.isZero() { + t.statsMu.Lock() + t.stats.accumulate(stats) + t.statsMu.Unlock() + stats = TracerStats{} + } + if sentMetrics != nil && requestBufMetricsets > 0 { + sentMetrics <- struct{}{} + sentMetrics = nil + } + if flushed != nil { + flushed <- struct{}{} + flushed = nil + } + if req.Buf != nil { + // req will be canceled by CloseRead below. + req.Buf = nil + } + iochanReader.CloseRead(io.EOF) + iochanReader = iochan.NewReader() + flushRequest = false + closeRequest = false + requestActive = false + requestBytesRead = 0 + requestBuf.Reset() + requestBufTransactions = 0 + requestBufSpans = 0 + requestBufErrors = 0 + requestBufMetricsets = 0 + if requestTimerActive { + if !requestTimer.Stop() { + <-requestTimer.C + } + requestTimerActive = false + } + } + + if !stats.isZero() { + t.statsMu.Lock() + t.stats.accumulate(stats) + t.statsMu.Unlock() + stats = TracerStats{} + } + + if gatherMetrics { + gatheringMetrics = true + metrics.disabled = cfg.disabledMetrics + t.gatherMetrics(ctx, cfg.metricsGatherers, &metrics, cfg.logger, gatheredMetrics) + if cfg.logger != nil { + cfg.logger.Debugf("gathering metrics") + } + } + + if !requestActive { + if buffer.Len() == 0 && metricsBuffer.Len() == 0 { + continue + } + sendStreamRequest <- gracePeriod + if metadata == nil { + metadata = t.jsonRequestMetadata() + } + zlibWriter.Reset(&requestBuf) + zlibWriter.Write(metadata) + zlibFlushed = false + zlibClosed = false + requestActive = true + requestTimer.Reset(cfg.requestDuration) + requestTimerActive = true + } + + if !closeRequest || !zlibClosed { + for requestBytesRead+requestBuf.Len() < cfg.requestSize { + if metricsBuffer.Len() > 0 { + if _, _, err := metricsBuffer.WriteBlockTo(zlibWriter); err == nil { + requestBufMetricsets++ + zlibWriter.Write([]byte("\n")) + zlibFlushed = false + if sentMetrics != nil { + // SendMetrics was called: close the request + // off so we can inform the user when the + // metrics have been processed. + closeRequest = true + } + } + continue + } + if buffer.Len() == 0 { + break + } + if h, _, err := buffer.WriteBlockTo(zlibWriter); err == nil { + switch h.Tag { + case transactionBlockTag: + requestBufTransactions++ + case spanBlockTag: + requestBufSpans++ + case errorBlockTag: + requestBufErrors++ + } + zlibWriter.Write([]byte("\n")) + zlibFlushed = false + } + } + if !closeRequest { + closeRequest = requestBytesRead+requestBuf.Len() >= cfg.requestSize + } + } + if closeRequest { + if !zlibClosed { + zlibWriter.Close() + zlibClosed = true + } + } else if flushRequest && !zlibFlushed { + zlibWriter.Flush() + flushRequest = false + zlibFlushed = true + } + + if req.Buf == nil || requestBuf.Len() == 0 { + continue + } + const zlibHeaderLen = 2 + if requestBytesRead+requestBuf.Len() > zlibHeaderLen { + n, err := requestBuf.Read(req.Buf) + if closeRequest && err == nil && requestBuf.Len() == 0 { + err = io.EOF + } + req.Respond(n, err) + req.Buf = nil + if n > 0 { + requestBytesRead += n + } + } + } +} + +// jsonRequestMetadata returns a JSON-encoded metadata object that features +// at the head of every request body. This is called exactly once, when the +// first request is made. +func (t *Tracer) jsonRequestMetadata() []byte { + var json fastjson.Writer + json.RawString(`{"metadata":`) + t.encodeRequestMetadata(&json) + json.RawString("}\n") + return json.Bytes() +} + +// metadataReader returns an io.Reader that holds the JSON-encoded metadata, +// suitable for including in a profile request. +func (t *Tracer) metadataReader() io.Reader { + var metadata fastjson.Writer + t.encodeRequestMetadata(&metadata) + return bytes.NewReader(metadata.Bytes()) +} + +func (t *Tracer) encodeRequestMetadata(json *fastjson.Writer) { + service := makeService(t.Service.Name, t.Service.Version, t.Service.Environment) + json.RawString(`{"system":`) + t.system.MarshalFastJSON(json) + json.RawString(`,"process":`) + t.process.MarshalFastJSON(json) + json.RawString(`,"service":`) + service.MarshalFastJSON(json) + if len(globalLabels) > 0 { + json.RawString(`,"labels":`) + globalLabels.MarshalFastJSON(json) + } + json.RawByte('}') +} + +// gatherMetrics gathers metrics from each of the registered +// metrics gatherers. Once all gatherers have returned, a value +// will be sent on the "gathered" channel. +func (t *Tracer) gatherMetrics(ctx context.Context, gatherers []MetricsGatherer, m *Metrics, l Logger, gathered chan<- struct{}) { + timestamp := model.Time(time.Now().UTC()) + var group sync.WaitGroup + for _, g := range gatherers { + group.Add(1) + go func(g MetricsGatherer) { + defer group.Done() + gatherMetrics(ctx, g, m, l) + }(g) + } + go func() { + group.Wait() + for _, m := range m.transactionGroupMetrics { + m.Timestamp = timestamp + } + for _, m := range m.metrics { + m.Timestamp = timestamp + } + gathered <- struct{}{} + }() +} + +type tracerEventType int + +const ( + transactionEvent tracerEventType = iota + spanEvent + errorEvent +) + +type tracerEvent struct { + eventType tracerEventType + + // err is set only if eventType == errorEvent. + err *ErrorData + + // tx is set only if eventType == transactionEvent. + tx struct { + *Transaction + // Transaction.TransactionData is nil at the + // point tracerEvent is created (to signify + // that the transaction is ended), so we pass + // it along side. + *TransactionData + } + + // span is set only if eventType == spanEvent. + span struct { + *Span + // Span.SpanData is nil at the point tracerEvent + // is created (to signify that the span is ended), + // so we pass it along side. + *SpanData + } +} diff --git a/vendor/go.elastic.co/apm/tracer_stats.go b/vendor/go.elastic.co/apm/tracer_stats.go new file mode 100644 index 00000000000..6e1436e8598 --- /dev/null +++ b/vendor/go.elastic.co/apm/tracer_stats.go @@ -0,0 +1,52 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apm + +// TracerStats holds statistics for a Tracer. +type TracerStats struct { + Errors TracerStatsErrors + ErrorsSent uint64 + ErrorsDropped uint64 + TransactionsSent uint64 + TransactionsDropped uint64 + SpansSent uint64 + SpansDropped uint64 +} + +// TracerStatsErrors holds error statistics for a Tracer. +type TracerStatsErrors struct { + SetContext uint64 + SendStream uint64 +} + +func (s TracerStats) isZero() bool { + return s == TracerStats{} +} + +// accumulate updates the stats by accumulating them with +// the values in rhs. +func (s *TracerStats) accumulate(rhs TracerStats) { + s.Errors.SetContext += rhs.Errors.SetContext + s.Errors.SendStream += rhs.Errors.SendStream + s.ErrorsSent += rhs.ErrorsSent + s.ErrorsDropped += rhs.ErrorsDropped + s.SpansSent += rhs.SpansSent + s.SpansDropped += rhs.SpansDropped + s.TransactionsSent += rhs.TransactionsSent + s.TransactionsDropped += rhs.TransactionsDropped +} diff --git a/vendor/go.elastic.co/apm/transaction.go b/vendor/go.elastic.co/apm/transaction.go new file mode 100644 index 00000000000..1e2ce56e9d5 --- /dev/null +++ b/vendor/go.elastic.co/apm/transaction.go @@ -0,0 +1,324 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apm + +import ( + cryptorand "crypto/rand" + "encoding/binary" + "math/rand" + "sync" + "time" +) + +// StartTransaction returns a new Transaction with the specified +// name and type, and with the start time set to the current time. +// This is equivalent to calling StartTransactionOptions with a +// zero TransactionOptions. +func (t *Tracer) StartTransaction(name, transactionType string) *Transaction { + return t.StartTransactionOptions(name, transactionType, TransactionOptions{}) +} + +// StartTransactionOptions returns a new Transaction with the +// specified name, type, and options. +func (t *Tracer) StartTransactionOptions(name, transactionType string, opts TransactionOptions) *Transaction { + td, _ := t.transactionDataPool.Get().(*TransactionData) + if td == nil { + td = &TransactionData{ + Duration: -1, + Context: Context{ + captureBodyMask: CaptureBodyTransactions, + }, + spanTimings: make(spanTimingsMap), + } + var seed int64 + if err := binary.Read(cryptorand.Reader, binary.LittleEndian, &seed); err != nil { + seed = time.Now().UnixNano() + } + td.rand = rand.New(rand.NewSource(seed)) + } + tx := &Transaction{tracer: t, TransactionData: td} + + tx.Name = name + tx.Type = transactionType + + var root bool + if opts.TraceContext.Trace.Validate() == nil { + tx.traceContext.Trace = opts.TraceContext.Trace + tx.traceContext.Options = opts.TraceContext.Options + if opts.TraceContext.Span.Validate() == nil { + tx.parentSpan = opts.TraceContext.Span + } + if opts.TransactionID.Validate() == nil { + tx.traceContext.Span = opts.TransactionID + } else { + binary.LittleEndian.PutUint64(tx.traceContext.Span[:], tx.rand.Uint64()) + } + if opts.TraceContext.State.Validate() == nil { + tx.traceContext.State = opts.TraceContext.State + } + } else { + // Start a new trace. We reuse the trace ID for the root transaction's ID + // if one is not specified in the options. + root = true + binary.LittleEndian.PutUint64(tx.traceContext.Trace[:8], tx.rand.Uint64()) + binary.LittleEndian.PutUint64(tx.traceContext.Trace[8:], tx.rand.Uint64()) + if opts.TransactionID.Validate() == nil { + tx.traceContext.Span = opts.TransactionID + } else { + copy(tx.traceContext.Span[:], tx.traceContext.Trace[:]) + } + } + + // Take a snapshot of config that should apply to all spans within the + // transaction. + instrumentationConfig := t.instrumentationConfig() + tx.maxSpans = instrumentationConfig.maxSpans + tx.spanFramesMinDuration = instrumentationConfig.spanFramesMinDuration + tx.stackTraceLimit = instrumentationConfig.stackTraceLimit + tx.Context.captureHeaders = instrumentationConfig.captureHeaders + tx.breakdownMetricsEnabled = t.breakdownMetrics.enabled + tx.propagateLegacyHeader = instrumentationConfig.propagateLegacyHeader + + if root { + sampler := instrumentationConfig.sampler + if sampler == nil || sampler.Sample(tx.traceContext) { + o := tx.traceContext.Options.WithRecorded(true) + tx.traceContext.Options = o + } + } else { + // TODO(axw) make this behaviour configurable. In some cases + // it may not be a good idea to honour the recorded flag, as + // it may open up the application to DoS by forced sampling. + // Even ignoring bad actors, a service that has many feeder + // applications may end up being sampled at a very high rate. + tx.traceContext.Options = opts.TraceContext.Options + } + tx.timestamp = opts.Start + if tx.timestamp.IsZero() { + tx.timestamp = time.Now() + } + return tx +} + +// TransactionOptions holds options for Tracer.StartTransactionOptions. +type TransactionOptions struct { + // TraceContext holds the TraceContext for a new transaction. If this is + // zero, a new trace will be started. + TraceContext TraceContext + + // TransactionID holds the ID to assign to the transaction. If this is + // zero, a new ID will be generated and used instead. + TransactionID SpanID + + // Start is the start time of the transaction. If this has the + // zero value, time.Now() will be used instead. + Start time.Time +} + +// Transaction describes an event occurring in the monitored service. +type Transaction struct { + tracer *Tracer + traceContext TraceContext + + mu sync.RWMutex + + // TransactionData holds the transaction data. This field is set to + // nil when either of the transaction's End or Discard methods are called. + *TransactionData +} + +// Sampled reports whether or not the transaction is sampled. +func (tx *Transaction) Sampled() bool { + if tx == nil { + return false + } + return tx.traceContext.Options.Recorded() +} + +// TraceContext returns the transaction's TraceContext. +// +// The resulting TraceContext's Span field holds the transaction's ID. +// If tx is nil, a zero (invalid) TraceContext is returned. +func (tx *Transaction) TraceContext() TraceContext { + if tx == nil { + return TraceContext{} + } + return tx.traceContext +} + +// ShouldPropagateLegacyHeader reports whether instrumentation should +// propagate the legacy "Elastic-Apm-Traceparent" header in addition to +// the standard W3C "traceparent" header. +// +// This method will be removed in a future major version when we remove +// support for propagating the legacy header. +func (tx *Transaction) ShouldPropagateLegacyHeader() bool { + tx.mu.Lock() + defer tx.mu.Unlock() + if tx.ended() { + return false + } + return tx.propagateLegacyHeader +} + +// EnsureParent returns the span ID for for tx's parent, generating a +// parent span ID if one has not already been set and tx has not been +// ended. If tx is nil or has been ended, a zero (invalid) SpanID is +// returned. +// +// This method can be used for generating a span ID for the RUM +// (Real User Monitoring) agent, where the RUM agent is initialized +// after the backend service returns. +func (tx *Transaction) EnsureParent() SpanID { + if tx == nil { + return SpanID{} + } + + tx.mu.Lock() + defer tx.mu.Unlock() + if tx.ended() { + return SpanID{} + } + + tx.TransactionData.mu.Lock() + defer tx.TransactionData.mu.Unlock() + if tx.parentSpan.isZero() { + // parentSpan can only be zero if tx is a root transaction + // for which GenerateParentTraceContext() has not previously + // been called. Reuse the latter half of the trace ID for + // the parent span ID; the first half is used for the + // transaction ID. + copy(tx.parentSpan[:], tx.traceContext.Trace[8:]) + } + return tx.parentSpan +} + +// Discard discards a previously started transaction. +// +// Calling Discard will set tx's TransactionData field to nil, so callers must +// ensure tx is not updated after Discard returns. +func (tx *Transaction) Discard() { + tx.mu.Lock() + defer tx.mu.Unlock() + if tx.ended() { + return + } + tx.reset(tx.tracer) +} + +// End enqueues tx for sending to the Elastic APM server. +// +// Calling End will set tx's TransactionData field to nil, so callers +// must ensure tx is not updated after End returns. +// +// If tx.Duration has not been set, End will set it to the elapsed time +// since the transaction's start time. +func (tx *Transaction) End() { + tx.mu.Lock() + defer tx.mu.Unlock() + if tx.ended() { + return + } + if tx.Duration < 0 { + tx.Duration = time.Since(tx.timestamp) + } + tx.enqueue() + tx.TransactionData = nil +} + +func (tx *Transaction) enqueue() { + event := tracerEvent{eventType: transactionEvent} + event.tx.Transaction = tx + event.tx.TransactionData = tx.TransactionData + select { + case tx.tracer.events <- event: + default: + // Enqueuing a transaction should never block. + tx.tracer.breakdownMetrics.recordTransaction(tx.TransactionData) + + // TODO(axw) use an atomic operation to increment. + tx.tracer.statsMu.Lock() + tx.tracer.stats.TransactionsDropped++ + tx.tracer.statsMu.Unlock() + tx.reset(tx.tracer) + } +} + +// ended reports whether or not End or Discard has been called. +// +// This must be called with tx.mu held. +func (tx *Transaction) ended() bool { + return tx.TransactionData == nil +} + +// TransactionData holds the details for a transaction, and is embedded +// inside Transaction. When a transaction is ended, its TransactionData +// field will be set to nil. +type TransactionData struct { + // Name holds the transaction name, initialized with the value + // passed to StartTransaction. + Name string + + // Type holds the transaction type, initialized with the value + // passed to StartTransaction. + Type string + + // Duration holds the transaction duration, initialized to -1. + // + // If you do not update Duration, calling Transaction.End will + // calculate the duration based on the elapsed time since the + // transaction's start time. + Duration time.Duration + + // Context describes the context in which the transaction occurs. + Context Context + + // Result holds the transaction result. + Result string + + maxSpans int + spanFramesMinDuration time.Duration + stackTraceLimit int + breakdownMetricsEnabled bool + propagateLegacyHeader bool + timestamp time.Time + + mu sync.Mutex + spansCreated int + spansDropped int + childrenTimer childrenTimer + spanTimings spanTimingsMap + rand *rand.Rand // for ID generation + // parentSpan holds the transaction's parent ID. It is protected by + // mu, since it can be updated by calling EnsureParent. + parentSpan SpanID +} + +// reset resets the TransactionData back to its zero state and places it back +// into the transaction pool. +func (td *TransactionData) reset(tracer *Tracer) { + *td = TransactionData{ + Context: td.Context, + Duration: -1, + rand: td.rand, + spanTimings: td.spanTimings, + } + td.Context.reset() + td.spanTimings.reset() + tracer.transactionDataPool.Put(td) +} diff --git a/vendor/go.elastic.co/apm/transport/api.go b/vendor/go.elastic.co/apm/transport/api.go new file mode 100644 index 00000000000..a9c2fe263d3 --- /dev/null +++ b/vendor/go.elastic.co/apm/transport/api.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package transport + +import ( + "context" + "io" +) + +// Transport provides an interface for sending streams of encoded model +// entities to the Elastic APM server, and for querying config. Methods +// are not required to be safe for concurrent use. +type Transport interface { + // SendStream sends a data stream to the server, returning when the + // stream has been closed (Read returns io.EOF) or the HTTP request + // terminates. + SendStream(context.Context, io.Reader) error +} diff --git a/vendor/go.elastic.co/apm/transport/default.go b/vendor/go.elastic.co/apm/transport/default.go new file mode 100644 index 00000000000..2a730eda67f --- /dev/null +++ b/vendor/go.elastic.co/apm/transport/default.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package transport + +var ( + // Default is the default Transport, using the + // ELASTIC_APM_* environment variables. + // + // If ELASTIC_APM_SERVER_URL is set to an invalid + // location, Default will be set to a Transport + // returning an error for every operation. + Default Transport + + // Discard is a Transport on which all operations + // succeed without doing anything. + Discard = discardTransport{} +) + +func init() { + _, _ = InitDefault() +} + +// InitDefault (re-)initializes Default, the default Transport, returning +// its new value along with the error that will be returned by the Transport +// if the environment variable configuration is invalid. The result is always +// non-nil. +func InitDefault() (Transport, error) { + t, err := getDefault() + Default = t + return t, err +} + +func getDefault() (Transport, error) { + s, err := NewHTTPTransport() + if err != nil { + return discardTransport{err}, err + } + return s, nil +} diff --git a/vendor/go.elastic.co/apm/transport/discard.go b/vendor/go.elastic.co/apm/transport/discard.go new file mode 100644 index 00000000000..3d61a34da7f --- /dev/null +++ b/vendor/go.elastic.co/apm/transport/discard.go @@ -0,0 +1,31 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package transport + +import ( + "context" + "io" +) + +type discardTransport struct { + err error +} + +func (s discardTransport) SendStream(context.Context, io.Reader) error { + return s.err +} diff --git a/vendor/go.elastic.co/apm/transport/doc.go b/vendor/go.elastic.co/apm/transport/doc.go new file mode 100644 index 00000000000..c5bf29e1909 --- /dev/null +++ b/vendor/go.elastic.co/apm/transport/doc.go @@ -0,0 +1,20 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package transport provides an interface and implementation +// for transporting data to the Elastic APM server. +package transport diff --git a/vendor/go.elastic.co/apm/transport/http.go b/vendor/go.elastic.co/apm/transport/http.go new file mode 100644 index 00000000000..d084a51d417 --- /dev/null +++ b/vendor/go.elastic.co/apm/transport/http.go @@ -0,0 +1,638 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package transport + +import ( + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "encoding/pem" + "fmt" + "io" + "io/ioutil" + "math/rand" + "mime/multipart" + "net/http" + "net/textproto" + "net/url" + "os" + "path" + "runtime" + "strconv" + "strings" + "sync/atomic" + "time" + + "github.com/pkg/errors" + + "go.elastic.co/apm/apmconfig" + "go.elastic.co/apm/internal/apmversion" + "go.elastic.co/apm/internal/configutil" +) + +const ( + intakePath = "/intake/v2/events" + profilePath = "/intake/v2/profile" + configPath = "/config/v1/agents" + + envAPIKey = "ELASTIC_APM_API_KEY" + envSecretToken = "ELASTIC_APM_SECRET_TOKEN" + envServerURLs = "ELASTIC_APM_SERVER_URLS" + envServerURL = "ELASTIC_APM_SERVER_URL" + envServerTimeout = "ELASTIC_APM_SERVER_TIMEOUT" + envServerCert = "ELASTIC_APM_SERVER_CERT" + envVerifyServerCert = "ELASTIC_APM_VERIFY_SERVER_CERT" +) + +var ( + // Take a copy of the http.DefaultTransport pointer, + // in case another package replaces the value later. + defaultHTTPTransport = http.DefaultTransport.(*http.Transport) + + defaultServerURL, _ = url.Parse("http://localhost:8200") + defaultServerTimeout = 30 * time.Second +) + +// HTTPTransport is an implementation of Transport, sending payloads via +// a net/http client. +type HTTPTransport struct { + // Client exposes the http.Client used by the HTTPTransport for + // sending requests to the APM Server. + Client *http.Client + intakeHeaders http.Header + configHeaders http.Header + profileHeaders http.Header + shuffleRand *rand.Rand + + urlIndex int32 + intakeURLs []*url.URL + configURLs []*url.URL + profileURLs []*url.URL +} + +// NewHTTPTransport returns a new HTTPTransport which can be used for +// streaming data to the APM Server. The returned HTTPTransport will be +// initialized using the following environment variables: +// +// - ELASTIC_APM_SERVER_URLS: a comma-separated list of APM Server URLs. +// The transport will use this list of URLs for sending requests, +// switching to the next URL in the list upon error. The list will be +// shuffled first. If no URLs are specified, then the transport will +// use the default URL "http://localhost:8200". +// +// - ELASTIC_APM_SERVER_TIMEOUT: timeout for requests to the APM Server. +// If not specified, defaults to 30 seconds. +// +// - ELASTIC_APM_SECRET_TOKEN: used to authenticate the agent. +// +// - ELASTIC_APM_SERVER_CERT: path to a PEM-encoded certificate that +// must match the APM Server-supplied certificate. This can be used +// to pin a self signed certificate. If this is set, then +// ELASTIC_APM_VERIFY_SERVER_CERT is ignored. +// +// - ELASTIC_APM_VERIFY_SERVER_CERT: if set to "false", the transport +// will not verify the APM Server's TLS certificate. Only relevant +// when using HTTPS. By default, the transport will verify server +// certificates. +// +func NewHTTPTransport() (*HTTPTransport, error) { + verifyServerCert, err := configutil.ParseBoolEnv(envVerifyServerCert, true) + if err != nil { + return nil, err + } + + serverTimeout, err := configutil.ParseDurationEnv(envServerTimeout, defaultServerTimeout) + if err != nil { + return nil, err + } + if serverTimeout < 0 { + serverTimeout = 0 + } + + serverURLs, err := initServerURLs() + if err != nil { + return nil, err + } + + tlsConfig := &tls.Config{InsecureSkipVerify: !verifyServerCert} + serverCertPath := os.Getenv(envServerCert) + if serverCertPath != "" { + serverCert, err := loadCertificate(serverCertPath) + if err != nil { + return nil, errors.Wrapf(err, "failed to load certificate from %s", serverCertPath) + } + // Disable standard verification, we'll check that the + // server supplies the exact certificate provided. + tlsConfig.InsecureSkipVerify = true + tlsConfig.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + return verifyPeerCertificate(rawCerts, serverCert) + } + } + + client := &http.Client{ + Timeout: serverTimeout, + Transport: &http.Transport{ + Proxy: defaultHTTPTransport.Proxy, + DialContext: defaultHTTPTransport.DialContext, + MaxIdleConns: defaultHTTPTransport.MaxIdleConns, + IdleConnTimeout: defaultHTTPTransport.IdleConnTimeout, + TLSHandshakeTimeout: defaultHTTPTransport.TLSHandshakeTimeout, + ExpectContinueTimeout: defaultHTTPTransport.ExpectContinueTimeout, + TLSClientConfig: tlsConfig, + }, + } + + commonHeaders := make(http.Header) + commonHeaders.Set("User-Agent", defaultUserAgent()) + + intakeHeaders := copyHeaders(commonHeaders) + intakeHeaders.Set("Content-Type", "application/x-ndjson") + intakeHeaders.Set("Content-Encoding", "deflate") + intakeHeaders.Set("Transfer-Encoding", "chunked") + + profileHeaders := copyHeaders(commonHeaders) + + t := &HTTPTransport{ + Client: client, + configHeaders: commonHeaders, + intakeHeaders: intakeHeaders, + profileHeaders: profileHeaders, + } + if apiKey := os.Getenv(envAPIKey); apiKey != "" { + t.SetAPIKey(apiKey) + } else if secretToken := os.Getenv(envSecretToken); secretToken != "" { + t.SetSecretToken(secretToken) + } + t.SetServerURL(serverURLs...) + return t, nil +} + +// SetServerURL sets the APM Server URL (or URLs) for sending requests. +// At least one URL must be specified, or the method will panic. The +// list will be randomly shuffled. +func (t *HTTPTransport) SetServerURL(u ...*url.URL) { + if len(u) == 0 { + panic("SetServerURL expects at least one URL") + } + intakeURLs := make([]*url.URL, len(u)) + configURLs := make([]*url.URL, len(u)) + profileURLs := make([]*url.URL, len(u)) + for i, u := range u { + intakeURLs[i] = urlWithPath(u, intakePath) + configURLs[i] = urlWithPath(u, configPath) + profileURLs[i] = urlWithPath(u, profilePath) + } + if n := len(intakeURLs); n > 0 { + if t.shuffleRand == nil { + t.shuffleRand = rand.New(rand.NewSource(time.Now().UnixNano())) + } + for i := n - 1; i > 0; i-- { + j := t.shuffleRand.Intn(i + 1) + intakeURLs[i], intakeURLs[j] = intakeURLs[j], intakeURLs[i] + configURLs[i], configURLs[j] = configURLs[j], configURLs[i] + profileURLs[i], profileURLs[j] = profileURLs[j], profileURLs[i] + } + } + t.intakeURLs = intakeURLs + t.configURLs = configURLs + t.profileURLs = profileURLs + t.urlIndex = 0 +} + +// SetUserAgent sets the User-Agent header that will be sent with each request. +func (t *HTTPTransport) SetUserAgent(ua string) { + t.setCommonHeader("User-Agent", ua) +} + +// SetSecretToken sets the Authorization header with the given secret token. +// +// This overrides the value specified via the ELASTIC_APM_SECRET_TOKEN or +// ELASTIC_APM_API_KEY environment variables, if either are set. +func (t *HTTPTransport) SetSecretToken(secretToken string) { + if secretToken != "" { + t.setCommonHeader("Authorization", "Bearer "+secretToken) + } else { + t.deleteCommonHeader("Authorization") + } +} + +// SetAPIKey sets the Authorization header with the given API Key. +// +// This overrides the value specified via the ELASTIC_APM_SECRET_TOKEN or +// ELASTIC_APM_API_KEY environment variables, if either are set. +func (t *HTTPTransport) SetAPIKey(apiKey string) { + if apiKey != "" { + t.setCommonHeader("Authorization", "ApiKey "+apiKey) + } else { + t.deleteCommonHeader("Authorization") + } +} + +func (t *HTTPTransport) setCommonHeader(key, value string) { + t.configHeaders.Set(key, value) + t.intakeHeaders.Set(key, value) + t.profileHeaders.Set(key, value) +} + +func (t *HTTPTransport) deleteCommonHeader(key string) { + t.configHeaders.Del(key) + t.intakeHeaders.Del(key) + t.profileHeaders.Del(key) +} + +// SendStream sends the stream over HTTP. If SendStream returns an error and +// the transport is configured with more than one APM Server URL, then the +// following request will be sent to the next URL in the list. +func (t *HTTPTransport) SendStream(ctx context.Context, r io.Reader) error { + urlIndex := atomic.LoadInt32(&t.urlIndex) + intakeURL := t.intakeURLs[urlIndex] + req := t.newRequest("POST", intakeURL) + req = requestWithContext(ctx, req) + req.Header = t.intakeHeaders + req.Body = ioutil.NopCloser(r) + if err := t.sendStreamRequest(req); err != nil { + atomic.StoreInt32(&t.urlIndex, (urlIndex+1)%int32(len(t.intakeURLs))) + return err + } + return nil +} + +func (t *HTTPTransport) sendStreamRequest(req *http.Request) error { + resp, err := t.Client.Do(req) + if err != nil { + return errors.Wrap(err, "sending event request failed") + } + switch resp.StatusCode { + case http.StatusOK, http.StatusAccepted: + resp.Body.Close() + return nil + } + defer resp.Body.Close() + + result := newHTTPError(resp) + if resp.StatusCode == http.StatusNotFound && result.Message == "404 page not found" { + // This may be an old (pre-6.5) APM server + // that does not support the v2 intake API. + result.Message = fmt.Sprintf("%s not found (requires APM Server 6.5.0 or newer)", req.URL) + } + return result +} + +// SendProfile sends a symbolised pprof profile, encoded as protobuf, and gzip-compressed. +// +// NOTE this is an experimental API, and may be removed in a future minor version, without +// being considered a breaking change. +func (t *HTTPTransport) SendProfile( + ctx context.Context, + metadataReader io.Reader, + profileReaders ...io.Reader, +) error { + urlIndex := atomic.LoadInt32(&t.urlIndex) + profileURL := t.profileURLs[urlIndex] + req := t.newRequest("POST", profileURL) + req = requestWithContext(ctx, req) + req.Header = t.profileHeaders + + writeBody := func(w *multipart.Writer) error { + h := make(textproto.MIMEHeader) + h.Set("Content-Disposition", fmt.Sprintf(`form-data; name="metadata"`)) + h.Set("Content-Type", "application/json") + part, err := w.CreatePart(h) + if err != nil { + return err + } + if _, err := io.Copy(part, metadataReader); err != nil { + return err + } + + for _, profileReader := range profileReaders { + h = make(textproto.MIMEHeader) + h.Set("Content-Disposition", fmt.Sprintf(`form-data; name="profile"`)) + h.Set("Content-Type", `application/x-protobuf; messageType="perftools.profiles.Profile"`) + part, err = w.CreatePart(h) + if err != nil { + return err + } + if _, err := io.Copy(part, profileReader); err != nil { + return err + } + } + return w.Close() + } + pipeR, pipeW := io.Pipe() + mpw := multipart.NewWriter(pipeW) + req.Header.Set("Content-Type", mpw.FormDataContentType()) + req.Body = pipeR + go func() { + err := writeBody(mpw) + pipeW.CloseWithError(err) + }() + return t.sendProfileRequest(req) +} + +func (t *HTTPTransport) sendProfileRequest(req *http.Request) error { + resp, err := t.Client.Do(req) + if err != nil { + return errors.Wrap(err, "sending profile request failed") + } + switch resp.StatusCode { + case http.StatusOK, http.StatusAccepted: + resp.Body.Close() + return nil + } + defer resp.Body.Close() + + result := newHTTPError(resp) + if resp.StatusCode == http.StatusNotFound && result.Message == "404 page not found" { + // TODO(axw) correct minimum server version. + result.Message = fmt.Sprintf("%s not found (requires APM Server 7.5.0 or newer)", req.URL) + } + return result +} + +// WatchConfig polls the APM Server for agent config changes, sending +// them over the returned channel. +func (t *HTTPTransport) WatchConfig(ctx context.Context, args apmconfig.WatchParams) <-chan apmconfig.Change { + // We have an initial delay to allow application initialisation code + // to close apm.DefaultTracer, which would cancel watching config. + const initialDelay = 1 * time.Second + + changes := make(chan apmconfig.Change) + go func() { + defer close(changes) + + var etag string + var out chan apmconfig.Change + var change apmconfig.Change + timer := time.NewTimer(initialDelay) + for { + select { + case <-ctx.Done(): + return + case out <- change: + out = nil + change = apmconfig.Change{} + continue + case <-timer.C: + } + + urlIndex := atomic.LoadInt32(&t.urlIndex) + query := make(url.Values) + query.Set("service.name", args.Service.Name) + if args.Service.Environment != "" { + query.Set("service.environment", args.Service.Environment) + } + url := *t.configURLs[urlIndex] + url.RawQuery = query.Encode() + + req := t.newRequest("GET", &url) + req.Header = t.configHeaders + if etag != "" { + req.Header = copyHeaders(req.Header) + req.Header.Set("If-None-Match", strconv.QuoteToASCII(etag)) + } + + req = requestWithContext(ctx, req) + resp := t.configRequest(req) + var send bool + if resp.err != nil { + // The request will have failed if the context has been + // cancelled. No need to send a a change in this case. + send = ctx.Err() == nil + } + if !send && resp.attrs != nil { + etag = resp.etag + send = true + } + if send { + change = apmconfig.Change{Err: resp.err, Attrs: resp.attrs} + out = changes + } + timer.Reset(resp.maxAge) + } + }() + return changes +} + +func (t *HTTPTransport) configRequest(req *http.Request) configResponse { + // defaultMaxAge is the default amount of time to wait between + // requests. This should only be used when the server does not + // respond with a Cache-Control header, or where the header is + // malformed. + const defaultMaxAge = 5 * time.Minute + + resp, err := t.Client.Do(req) + if err != nil { + // TODO(axw) this might indicate that the APM Server is unavailable. + // In this case, we should allow a change in URL due to SendStream + // to cut the defaultMaxAge delay short. + return configResponse{ + err: errors.Wrap(err, "sending config request failed"), + maxAge: defaultMaxAge, + } + } + defer resp.Body.Close() + + var response configResponse + if etag, err := strconv.Unquote(resp.Header.Get("Etag")); err == nil { + response.etag = etag + } + cacheControl := parseCacheControl(resp.Header.Get("Cache-Control")) + response.maxAge = cacheControl.maxAge + if response.maxAge < 0 { + response.maxAge = defaultMaxAge + } + + switch resp.StatusCode { + case http.StatusNotModified, http.StatusForbidden, http.StatusNotFound: + // 304 (Not Modified) is returned when the config has not changed since the previous query. + // 403 (Forbidden) is returned if the server does not have the connection to Kibana enabled. + // 404 (Not Found) is returned by old servers that do not implement the config endpoint. + return response + case http.StatusOK: + attrs := make(map[string]string) + // TODO(axw) handling EOF shouldn't be necessary, server currently responds with an empty + // body when there is no config. + if err := json.NewDecoder(resp.Body).Decode(&attrs); err != nil && err != io.EOF { + response.err = err + } else { + response.attrs = attrs + } + return response + } + response.err = newHTTPError(resp) + return response +} + +func (t *HTTPTransport) newRequest(method string, url *url.URL) *http.Request { + req := &http.Request{ + Method: method, + URL: url, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Host: url.Host, + } + return req +} + +func urlWithPath(url *url.URL, p string) *url.URL { + urlCopy := *url + urlCopy.Path = path.Clean(urlCopy.Path + p) + if urlCopy.RawPath != "" { + urlCopy.RawPath = path.Clean(urlCopy.RawPath + p) + } + return &urlCopy +} + +// HTTPError is an error returned by HTTPTransport methods when requests fail. +type HTTPError struct { + Response *http.Response + Message string +} + +func newHTTPError(resp *http.Response) *HTTPError { + bodyContents, err := ioutil.ReadAll(resp.Body) + if err == nil { + resp.Body = ioutil.NopCloser(bytes.NewReader(bodyContents)) + } + return &HTTPError{ + Response: resp, + Message: strings.TrimSpace(string(bodyContents)), + } +} + +func (e *HTTPError) Error() string { + msg := fmt.Sprintf("request failed with %s", e.Response.Status) + if e.Message != "" { + msg += ": " + e.Message + } + return msg +} + +// initServerURLs parses ELASTIC_APM_SERVER_URLS if specified, +// otherwise parses ELASTIC_APM_SERVER_URL if specified. If +// neither are specified, then the default localhost URL is +// returned. +func initServerURLs() ([]*url.URL, error) { + key := envServerURLs + value := os.Getenv(key) + if value == "" { + key = envServerURL + value = os.Getenv(key) + } + var urls []*url.URL + for _, field := range strings.Split(value, ",") { + field = strings.TrimSpace(field) + if field == "" { + continue + } + u, err := url.Parse(field) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse %s", key) + } + urls = append(urls, u) + } + if len(urls) == 0 { + urls = []*url.URL{defaultServerURL} + } + return urls, nil +} + +func requestWithContext(ctx context.Context, req *http.Request) *http.Request { + url := req.URL + req.URL = nil + reqCopy := req.WithContext(ctx) + reqCopy.URL = url + req.URL = url + return reqCopy +} + +func loadCertificate(path string) (*x509.Certificate, error) { + pemBytes, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + for { + var certBlock *pem.Block + certBlock, pemBytes = pem.Decode(pemBytes) + if certBlock == nil { + return nil, errors.New("missing or invalid certificate") + } + if certBlock.Type == "CERTIFICATE" { + return x509.ParseCertificate(certBlock.Bytes) + } + } +} + +func verifyPeerCertificate(rawCerts [][]byte, trusted *x509.Certificate) error { + if len(rawCerts) == 0 { + return errors.New("missing leaf certificate") + } + cert, err := x509.ParseCertificate(rawCerts[0]) + if err != nil { + return errors.Wrap(err, "failed to parse certificate from server") + } + if !cert.Equal(trusted) { + return errors.New("failed to verify server certificate") + } + return nil +} + +func defaultUserAgent() string { + return fmt.Sprintf("elasticapm-go/%s go/%s", apmversion.AgentVersion, runtime.Version()) +} + +func copyHeaders(in http.Header) http.Header { + out := make(http.Header, len(in)) + for k, vs := range in { + vsCopy := make([]string, len(vs)) + copy(vsCopy, vs) + out[k] = vsCopy + } + return out +} + +type configResponse struct { + err error + attrs map[string]string + etag string + maxAge time.Duration +} + +type cacheControl struct { + maxAge time.Duration +} + +func parseCacheControl(s string) cacheControl { + fields := strings.SplitN(s, "max-age=", 2) + if len(fields) < 2 { + return cacheControl{maxAge: -1} + } + s = fields[1] + if i := strings.IndexRune(s, ','); i != -1 { + s = s[:i] + } + maxAge, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return cacheControl{maxAge: -1} + } + return cacheControl{maxAge: time.Duration(maxAge) * time.Second} +} diff --git a/vendor/go.elastic.co/apm/transport/transporttest/doc.go b/vendor/go.elastic.co/apm/transport/transporttest/doc.go new file mode 100644 index 00000000000..13f9e3adf1f --- /dev/null +++ b/vendor/go.elastic.co/apm/transport/transporttest/doc.go @@ -0,0 +1,20 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package transporttest provides implementations of +// transport.Transport for testing purposes. +package transporttest diff --git a/vendor/go.elastic.co/apm/transport/transporttest/err.go b/vendor/go.elastic.co/apm/transport/transporttest/err.go new file mode 100644 index 00000000000..668fff8a310 --- /dev/null +++ b/vendor/go.elastic.co/apm/transport/transporttest/err.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package transporttest + +import ( + "context" + "io" + "io/ioutil" + + "go.elastic.co/apm/transport" +) + +// Discard is a transport.Transport which discards +// all streams, and returns no errors. +var Discard transport.Transport = ErrorTransport{} + +// ErrorTransport is a transport that returns the stored error +// for each method call. +type ErrorTransport struct { + Error error +} + +// SendStream discards the stream and returns t.Error. +func (t ErrorTransport) SendStream(ctx context.Context, r io.Reader) error { + errc := make(chan error, 1) + go func() { + _, err := io.Copy(ioutil.Discard, r) + errc <- err + }() + select { + case err := <-errc: + if err != nil { + return err + } + return t.Error + case <-ctx.Done(): + return ctx.Err() + } +} diff --git a/vendor/go.elastic.co/apm/transport/transporttest/recorder.go b/vendor/go.elastic.co/apm/transport/transporttest/recorder.go new file mode 100644 index 00000000000..4db125ab8b4 --- /dev/null +++ b/vendor/go.elastic.co/apm/transport/transporttest/recorder.go @@ -0,0 +1,203 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package transporttest + +import ( + "compress/zlib" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "sync" + + "github.com/google/go-cmp/cmp" + + "go.elastic.co/apm" + "go.elastic.co/apm/model" +) + +// NewRecorderTracer returns a new apm.Tracer and +// RecorderTransport, which is set as the tracer's transport. +// +// DEPRECATED. Use apmtest.NewRecordingTracer instead. +func NewRecorderTracer() (*apm.Tracer, *RecorderTransport) { + var transport RecorderTransport + tracer, err := apm.NewTracerOptions(apm.TracerOptions{ + ServiceName: "transporttest", + Transport: &transport, + }) + if err != nil { + panic(err) + } + return tracer, &transport +} + +// RecorderTransport implements transport.Transport, recording the +// streams sent. The streams can be retrieved using the Payloads +// method. +type RecorderTransport struct { + mu sync.Mutex + metadata *metadata + payloads Payloads +} + +// ResetPayloads clears out any recorded payloads. +func (r *RecorderTransport) ResetPayloads() { + r.mu.Lock() + defer r.mu.Unlock() + r.payloads = Payloads{} +} + +// SendStream records the stream such that it can later be obtained via Payloads. +func (r *RecorderTransport) SendStream(ctx context.Context, stream io.Reader) error { + return r.record(ctx, stream) +} + +// SendProfile records the stream such that it can later be obtained via Payloads. +func (r *RecorderTransport) SendProfile(ctx context.Context, metadata io.Reader, profiles ...io.Reader) error { + return r.recordProto(ctx, metadata, profiles) +} + +// Metadata returns the metadata recorded by the transport. If metadata is yet to +// be received, this method will panic. +func (r *RecorderTransport) Metadata() (_ model.System, _ model.Process, _ model.Service, labels model.StringMap) { + r.mu.Lock() + defer r.mu.Unlock() + return r.metadata.System, r.metadata.Process, r.metadata.Service, r.metadata.Labels +} + +// Payloads returns the payloads recorded by SendStream. +func (r *RecorderTransport) Payloads() Payloads { + r.mu.Lock() + defer r.mu.Unlock() + return r.payloads +} + +func (r *RecorderTransport) record(ctx context.Context, stream io.Reader) error { + reader, err := zlib.NewReader(stream) + if err != nil { + if err == io.ErrUnexpectedEOF { + if contextDone(ctx) { + return ctx.Err() + } + // truly unexpected + } + panic(err) + } + decoder := json.NewDecoder(reader) + + // The first object of any request must be a metadata struct. + var metadataPayload struct { + Metadata metadata `json:"metadata"` + } + if err := decoder.Decode(&metadataPayload); err != nil { + panic(err) + } + r.recordMetadata(&metadataPayload.Metadata) + + for { + var payload struct { + Error *model.Error `json:"error"` + Metrics *model.Metrics `json:"metricset"` + Span *model.Span `json:"span"` + Transaction *model.Transaction `json:"transaction"` + } + err := decoder.Decode(&payload) + if err == io.EOF || (err == io.ErrUnexpectedEOF && contextDone(ctx)) { + break + } else if err != nil { + panic(err) + } + r.mu.Lock() + switch { + case payload.Error != nil: + r.payloads.Errors = append(r.payloads.Errors, *payload.Error) + case payload.Metrics != nil: + r.payloads.Metrics = append(r.payloads.Metrics, *payload.Metrics) + case payload.Span != nil: + r.payloads.Spans = append(r.payloads.Spans, *payload.Span) + case payload.Transaction != nil: + r.payloads.Transactions = append(r.payloads.Transactions, *payload.Transaction) + } + r.mu.Unlock() + } + return nil +} + +func (r *RecorderTransport) recordProto(ctx context.Context, metadataReader io.Reader, profileReaders []io.Reader) error { + var metadata metadata + if err := json.NewDecoder(metadataReader).Decode(&metadata); err != nil { + panic(err) + } + r.recordMetadata(&metadata) + + r.mu.Lock() + defer r.mu.Unlock() + for _, profileReader := range profileReaders { + data, err := ioutil.ReadAll(profileReader) + if err != nil { + panic(err) + } + r.payloads.Profiles = append(r.payloads.Profiles, data) + } + return nil +} + +func (r *RecorderTransport) recordMetadata(m *metadata) { + r.mu.Lock() + defer r.mu.Unlock() + if r.metadata == nil { + r.metadata = m + } else { + // Make sure the metadata doesn't change between requests. + if diff := cmp.Diff(r.metadata, m); diff != "" { + panic(fmt.Errorf("metadata changed\n%s", diff)) + } + } +} + +func contextDone(ctx context.Context) bool { + select { + case <-ctx.Done(): + return true + default: + return false + } +} + +// Payloads holds the recorded payloads. +type Payloads struct { + Errors []model.Error + Metrics []model.Metrics + Spans []model.Span + Transactions []model.Transaction + Profiles [][]byte +} + +// Len returns the number of recorded payloads. +func (p *Payloads) Len() int { + return len(p.Transactions) + len(p.Errors) + len(p.Metrics) +} + +type metadata struct { + System model.System `json:"system"` + Process model.Process `json:"process"` + Service model.Service `json:"service"` + Labels model.StringMap `json:"labels,omitempty"` +} diff --git a/vendor/go.elastic.co/apm/utils.go b/vendor/go.elastic.co/apm/utils.go new file mode 100644 index 00000000000..ae24404e315 --- /dev/null +++ b/vendor/go.elastic.co/apm/utils.go @@ -0,0 +1,242 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apm + +import ( + "fmt" + "math/rand" + "os" + "path/filepath" + "reflect" + "regexp" + "runtime" + "strings" + "time" + + "github.com/pkg/errors" + + "go.elastic.co/apm/internal/apmhostutil" + "go.elastic.co/apm/internal/apmstrings" + "go.elastic.co/apm/model" +) + +var ( + currentProcess model.Process + goAgent = model.Agent{Name: "go", Version: AgentVersion} + goLanguage = model.Language{Name: "go", Version: runtime.Version()} + goRuntime = model.Runtime{Name: runtime.Compiler, Version: runtime.Version()} + localSystem model.System + + serviceNameInvalidRegexp = regexp.MustCompile("[^" + serviceNameValidClass + "]") + labelKeyReplacer = strings.NewReplacer(`.`, `_`, `*`, `_`, `"`, `_`) + + rtypeBool = reflect.TypeOf(false) + rtypeFloat64 = reflect.TypeOf(float64(0)) +) + +const ( + envHostname = "ELASTIC_APM_HOSTNAME" + envServiceNodeName = "ELASTIC_APM_SERVICE_NODE_NAME" + + serviceNameValidClass = "a-zA-Z0-9 _-" + + // At the time of writing, all keyword length limits + // are 1024 runes, enforced by JSON Schema. + stringLengthLimit = 1024 + + // Non-keyword string fields are not limited in length + // by JSON Schema, but we still truncate all strings. + // Some strings, such as database statement, we explicitly + // allow to be longer than others. + longStringLengthLimit = 10000 +) + +func init() { + currentProcess = getCurrentProcess() + localSystem = getLocalSystem() +} + +func getCurrentProcess() model.Process { + ppid := os.Getppid() + title, err := currentProcessTitle() + if err != nil || title == "" { + title = filepath.Base(os.Args[0]) + } + return model.Process{ + Pid: os.Getpid(), + Ppid: &ppid, + Title: truncateString(title), + Argv: os.Args, + } +} + +func makeService(name, version, environment string) model.Service { + service := model.Service{ + Name: truncateString(name), + Version: truncateString(version), + Environment: truncateString(environment), + Agent: &goAgent, + Language: &goLanguage, + Runtime: &goRuntime, + } + + serviceNodeName := os.Getenv(envServiceNodeName) + if serviceNodeName != "" { + service.Node = &model.ServiceNode{ConfiguredName: truncateString(serviceNodeName)} + } + + return service +} + +func getLocalSystem() model.System { + system := model.System{ + Architecture: runtime.GOARCH, + Platform: runtime.GOOS, + } + system.Hostname = os.Getenv(envHostname) + if system.Hostname == "" { + if hostname, err := os.Hostname(); err == nil { + system.Hostname = hostname + } + } + system.Hostname = truncateString(system.Hostname) + if container, err := apmhostutil.Container(); err == nil { + system.Container = container + } + system.Kubernetes = getKubernetesMetadata() + return system +} + +func getKubernetesMetadata() *model.Kubernetes { + kubernetes, err := apmhostutil.Kubernetes() + if err != nil { + kubernetes = nil + } + namespace := os.Getenv("KUBERNETES_NAMESPACE") + podName := os.Getenv("KUBERNETES_POD_NAME") + podUID := os.Getenv("KUBERNETES_POD_UID") + nodeName := os.Getenv("KUBERNETES_NODE_NAME") + if namespace == "" && podName == "" && podUID == "" && nodeName == "" { + return kubernetes + } + if kubernetes == nil { + kubernetes = &model.Kubernetes{} + } + if namespace != "" { + kubernetes.Namespace = namespace + } + if nodeName != "" { + if kubernetes.Node == nil { + kubernetes.Node = &model.KubernetesNode{} + } + kubernetes.Node.Name = nodeName + } + if podName != "" || podUID != "" { + if kubernetes.Pod == nil { + kubernetes.Pod = &model.KubernetesPod{} + } + if podName != "" { + kubernetes.Pod.Name = podName + } + if podUID != "" { + kubernetes.Pod.UID = podUID + } + } + return kubernetes +} + +func cleanLabelKey(k string) string { + return labelKeyReplacer.Replace(k) +} + +// makeLabelValue returns v as a value suitable for including +// in a label value. If v is numerical or boolean, then it will +// be returned as-is; otherwise the value will be returned as a +// string, using fmt.Sprint if necessary, and possibly truncated +// using truncateString. +func makeLabelValue(v interface{}) interface{} { + switch v.(type) { + case nil, bool, float32, float64, + uint, uint8, uint16, uint32, uint64, + int, int8, int16, int32, int64: + return v + case string: + return truncateString(v.(string)) + } + // Slow path. If v has a non-basic type whose underlying + // type is convertible to bool or float64, return v as-is. + // Otherwise, stringify. + rtype := reflect.TypeOf(v) + if rtype.ConvertibleTo(rtypeBool) || rtype.ConvertibleTo(rtypeFloat64) { + // Custom type + return v + } + return truncateString(fmt.Sprint(v)) +} + +func validateServiceName(name string) error { + idx := serviceNameInvalidRegexp.FindStringIndex(name) + if idx == nil { + return nil + } + return errors.Errorf( + "invalid service name %q: character %q is not in the allowed set (%s)", + name, name[idx[0]], serviceNameValidClass, + ) +} + +func sanitizeServiceName(name string) string { + return serviceNameInvalidRegexp.ReplaceAllString(name, "_") +} + +func truncateString(s string) string { + s, _ = apmstrings.Truncate(s, stringLengthLimit) + return s +} + +func truncateLongString(s string) string { + s, _ = apmstrings.Truncate(s, longStringLengthLimit) + return s +} + +func nextGracePeriod(p time.Duration) time.Duration { + if p == -1 { + return 0 + } + for i := time.Duration(0); i < 6; i++ { + if p == (i * i * time.Second) { + return (i + 1) * (i + 1) * time.Second + } + } + return p +} + +// jitterDuration returns d +/- some multiple of d in the range [0,j]. +func jitterDuration(d time.Duration, rng *rand.Rand, j float64) time.Duration { + if d == 0 || j == 0 { + return d + } + r := (rng.Float64() * j * 2) - j + return d + time.Duration(float64(d)*r) +} + +func durationMicros(d time.Duration) float64 { + us := d / time.Microsecond + ns := d % time.Microsecond + return float64(us) + float64(ns)/1e9 +} diff --git a/vendor/go.elastic.co/apm/utils_linux.go b/vendor/go.elastic.co/apm/utils_linux.go new file mode 100644 index 00000000000..abf97366ab2 --- /dev/null +++ b/vendor/go.elastic.co/apm/utils_linux.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apm + +import ( + "bytes" + "syscall" + "unsafe" +) + +func currentProcessTitle() (string, error) { + // PR_GET_NAME (since Linux 2.6.11) + // Return the name of the calling thread, in the buffer pointed to by + // (char *) arg2. The buffer should allow space for up to 16 bytes; + // the returned string will be null-terminated. + var buf [16]byte + if _, _, errno := syscall.RawSyscall6( + syscall.SYS_PRCTL, syscall.PR_GET_NAME, + uintptr(unsafe.Pointer(&buf[0])), + 0, 0, 0, 0, + ); errno != 0 { + return "", errno + } + return string(buf[:bytes.IndexByte(buf[:], 0)]), nil +} diff --git a/vendor/go.elastic.co/apm/utils_other.go b/vendor/go.elastic.co/apm/utils_other.go new file mode 100644 index 00000000000..06a38c5d9e4 --- /dev/null +++ b/vendor/go.elastic.co/apm/utils_other.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//+build !linux + +package apm + +import ( + "github.com/pkg/errors" + + sysinfo "github.com/elastic/go-sysinfo" +) + +func currentProcessTitle() (string, error) { + proc, err := sysinfo.Self() + if err != nil { + return "", errors.Wrap(err, "failed to get process info") + } + info, err := proc.Info() + if err != nil { + return "", errors.Wrap(err, "failed to get process info") + } + return info.Name, nil +} diff --git a/vendor/go.elastic.co/apm/version.go b/vendor/go.elastic.co/apm/version.go new file mode 100644 index 00000000000..4992c913715 --- /dev/null +++ b/vendor/go.elastic.co/apm/version.go @@ -0,0 +1,23 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package apm + +const ( + // AgentVersion is the Elastic APM Go Agent version. + AgentVersion = "1.7.2" +) diff --git a/vendor/go.elastic.co/fastjson/.travis.yml b/vendor/go.elastic.co/fastjson/.travis.yml new file mode 100644 index 00000000000..9949d26e09c --- /dev/null +++ b/vendor/go.elastic.co/fastjson/.travis.yml @@ -0,0 +1,9 @@ +language: go +go_import_path: go.elastic.co/fastjson + +go: + - stable + - "1.8.x" + +script: + - go test -v ./... diff --git a/vendor/go.elastic.co/fastjson/LICENSE b/vendor/go.elastic.co/fastjson/LICENSE new file mode 100644 index 00000000000..f44e6a0fa12 --- /dev/null +++ b/vendor/go.elastic.co/fastjson/LICENSE @@ -0,0 +1,23 @@ +Copyright 2018 Elasticsearch BV + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +--- + +Copyright (c) 2016 Mail.Ru Group + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/go.elastic.co/fastjson/README.md b/vendor/go.elastic.co/fastjson/README.md new file mode 100644 index 00000000000..59ccccfaada --- /dev/null +++ b/vendor/go.elastic.co/fastjson/README.md @@ -0,0 +1,134 @@ +[![Travis-CI](https://travis-ci.org/elastic/go-fastjson.svg)](https://travis-ci.org/elastic/go-fastjson) + +# fastjson: fast JSON encoder for Go + +Package fastjson provides a library and code generator for fast JSON encoding. + +The supplied code generator (cmd/generate-fastjson) generates JSON marshalling +methods for all exported types within a specified package. + +## Requirements + +Go 1.8+ + +## License + +Apache 2.0. + +## Installation + +```bash +go get -u go.elastic.co/fastjson/... +``` + +## Code generation + +Package fastjson is intended to be used with the accompanying code generator, +cmd/generate-fastjson. This code generator will parse the Go code of a +specified package, and write out fastjson marshalling method (MarshalFastJSON) +definitions for the exported types in the package. + +You can provide your own custom marshalling logic for a type by defining a +MarshalFastJSON method for it. The generator will not generate methods for +those types with existing marshalling methods. + +### Usage + +``` +generate-fastjson + -f remove the output file if it exists + -o string + file to which output will be written (default "-") +``` + +### Custom omitempty extension + +The standard `json` struct tags defined by `encoding/json` are honoured, +enabling you to generate fastjson-marshalling code for your existing code. + +We extend the `omitempty` option by enabling you to define an unexported +method on your type `T`, `func (T) isZero() bool`, which will be called +to determine whether or not the value is considered empty. This enables +`omitempty` on non-pointer struct types. + +### Example + +Given the following package: + +```go +package example + +type Foo struct { + Bar Bar `json:",omitempty"` +} + +type Bar struct { + Baz Baz + Qux *Qux `json:"quux,omitempty"` +} + +func (b Bar) isZero() bool { + return b == (Bar{}) +} + +type Baz struct { +} + +func (Baz) MarshalFastJSON(w *fastjson.Writer) error { +} + +type Qux struct{} +``` + +Assuming we're in the package directory, we would generate the methods like so, +which will write a Go file to stdout: + +```bash +generate-fastjson . +``` + +Output: +```go +// Code generated by "generate-fastjson". DO NOT EDIT. + +package example + +import ( + "go.elastic.co/fastjson" +) + +func (v *Foo) MarshalFastJSON(w *fastjson.Writer) error { + w.RawByte('{') + if !v.Bar.isZero() { + w.RawString("\"Bar\":") + if err := v.Bar.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr == err + } + } + w.RawByte('}') + return nil +} + +func (v *Bar) MarshalFastJSON(w *fastjson.Writer) error { + var firstErr error + w.RawByte('{') + w.RawString("\"Baz\":") + if err := v.Baz.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr = err + } + if v.Qux != nil { + w.RawString(",\"quux\":") + if err := v.Qux.MarshalFastJSON(w); err != nil && firstErr == nil { + firstErr == err + } + } + w.RawByte('}') + return firstErr +} + +func (v *Qux) MarshalFastJSON(w *fastjson.Writer) error { + w.RawByte('{') + w.RawByte('}') + return nil +} +``` diff --git a/vendor/go.elastic.co/fastjson/doc.go b/vendor/go.elastic.co/fastjson/doc.go new file mode 100644 index 00000000000..4a3576be31e --- /dev/null +++ b/vendor/go.elastic.co/fastjson/doc.go @@ -0,0 +1,23 @@ +// Copyright 2018 Elasticsearch BV +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package fastjson provides a library for fast JSON encoding, +// optimised for static code generation. +// +// Fastjson functions and interfaces are structured such that +// all encoding appends to a buffer, enabling buffer reuse +// without forcing specific mechanisms such as sync.Pool. This +// enables zero-allocation encoding without incurring any +// concurrency overhead in certain applications. +package fastjson // import "go.elastic.co/fastjson" diff --git a/vendor/go.elastic.co/fastjson/go.mod b/vendor/go.elastic.co/fastjson/go.mod new file mode 100644 index 00000000000..0b130f35c1c --- /dev/null +++ b/vendor/go.elastic.co/fastjson/go.mod @@ -0,0 +1,3 @@ +module go.elastic.co/fastjson + +require github.com/pkg/errors v0.8.0 diff --git a/vendor/go.elastic.co/fastjson/go.sum b/vendor/go.elastic.co/fastjson/go.sum new file mode 100644 index 00000000000..3dfe462f062 --- /dev/null +++ b/vendor/go.elastic.co/fastjson/go.sum @@ -0,0 +1,2 @@ +github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= diff --git a/vendor/go.elastic.co/fastjson/marshaler.go b/vendor/go.elastic.co/fastjson/marshaler.go new file mode 100644 index 00000000000..7359e6dfde1 --- /dev/null +++ b/vendor/go.elastic.co/fastjson/marshaler.go @@ -0,0 +1,151 @@ +// Copyright 2018 Elasticsearch BV +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fastjson + +import ( + "encoding/json" + "fmt" + + "github.com/pkg/errors" +) + +// Marshaler defines an interface that types can implement to provide +// fast JSON marshaling. +type Marshaler interface { + // MarshalFastJSON writes a JSON representation of the type to w. + // + // MarshalFastJSON is expected to suppress any panics. Depending + // on the application, it may be expected that MarshalFastJSON + // writes valid JSON to w, even in error cases. + // + // The returned error will be propagated up through to callers of + // fastjson.Marshal. + MarshalFastJSON(w *Writer) error +} + +// Appender defines an interface that types can implement to append +// their JSON representation to a buffer. +type Appender interface { + // AppendJSON appends the JSON representation of the value to the + // buffer, and returns the extended buffer. + // + // AppendJSON is required not to panic or fail. + AppendJSON([]byte) []byte +} + +// Marshal marshals v as JSON to w. +// +// For all basic types, Marshal uses w's methods to marshal the values +// directly. If v implements Marshaler, its MarshalFastJSON method will +// be used; if v implements Appender, its AppendJSON method will be used, +// and it is assumed to append valid JSON. As a final resort, we use +// json.Marshal. +// +// Where json.Marshal is used internally (see above), errors or panics +// produced by json.Marshal will be encoded as JSON objects, with special keys +// "__ERROR__" for errors, and "__PANIC__" for panics. e.g. if json.Marshal +// panics due to a broken json.Marshaler implementation or assumption, then +// Marshal will encode the panic as +// +// {"__PANIC__": "panic calling MarshalJSON for type Foo: reason"} +// +// Marshal returns the first error encountered. +func Marshal(w *Writer, v interface{}) error { + switch v := v.(type) { + case nil: + w.RawString("null") + case string: + w.String(v) + case uint: + w.Uint64(uint64(v)) + case uint8: + w.Uint64(uint64(v)) + case uint16: + w.Uint64(uint64(v)) + case uint32: + w.Uint64(uint64(v)) + case uint64: + w.Uint64(v) + case int: + w.Int64(int64(v)) + case int8: + w.Int64(int64(v)) + case int16: + w.Int64(int64(v)) + case int32: + w.Int64(int64(v)) + case int64: + w.Int64(v) + case float32: + w.Float32(v) + case float64: + w.Float64(v) + case bool: + w.Bool(v) + case map[string]interface{}: + if v == nil { + w.RawString("null") + return nil + } + w.RawByte('{') + var firstErr error + first := true + for k, v := range v { + if first { + first = false + } else { + w.RawByte(',') + } + w.String(k) + w.RawByte(':') + if err := Marshal(w, v); err != nil && firstErr == nil { + firstErr = err + } + } + w.RawByte('}') + return firstErr + case Marshaler: + return v.MarshalFastJSON(w) + case Appender: + w.buf = v.AppendJSON(w.buf) + default: + return marshalReflect(w, v) + } + return nil +} + +func marshalReflect(w *Writer, v interface{}) (result error) { + defer func() { + if r := recover(); r != nil { + err, ok := r.(error) + if !ok { + err = fmt.Errorf("%s", r) + } + result = errors.Wrapf(err, "panic calling MarshalJSON for type %T", v) + w.RawString(`{"__PANIC__":`) + w.String(fmt.Sprint(result)) + w.RawByte('}') + } + }() + raw, err := json.Marshal(v) + if err != nil { + w.RawString(`{"__ERROR__":`) + w.String(fmt.Sprint(err)) + w.RawByte('}') + return err + } + w.RawBytes(raw) + return nil +} diff --git a/vendor/go.elastic.co/fastjson/writer.go b/vendor/go.elastic.co/fastjson/writer.go new file mode 100644 index 00000000000..e66731d9f80 --- /dev/null +++ b/vendor/go.elastic.co/fastjson/writer.go @@ -0,0 +1,181 @@ +// Copyright 2018 Elasticsearch BV +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fastjson + +import ( + "strconv" + "time" + "unicode/utf8" +) + +// Writer is a JSON writer, appending to an internal buffer. +// +// Writer is not safe for concurrent use. A Writer can be +// reset and reused, which will reuse the underlying buffer. +type Writer struct { + buf []byte +} + +// Bytes returns the internal buffer. The result is invalidated when Reset is called. +func (w *Writer) Bytes() []byte { + return w.buf +} + +// Size returns the current size of the buffer. Size is typically used in conjunction +// with Rewind, to mark a position to which the writer may later be rewound. +func (w *Writer) Size() int { + return len(w.buf) +} + +// Rewind rewinds the buffer such that it has size bytes, dropping everything proceeding. +func (w *Writer) Rewind(size int) { + w.buf = w.buf[:size] +} + +// Reset resets the internal []byte buffer to empty. +func (w *Writer) Reset() { + w.buf = w.buf[:0] +} + +// RawByte appends c to the buffer. +func (w *Writer) RawByte(c byte) { + w.buf = append(w.buf, c) +} + +// RawBytes appends data, unmodified, to the buffer. +func (w *Writer) RawBytes(data []byte) { + w.buf = append(w.buf, data...) +} + +// RawString appends s to the buffer. +func (w *Writer) RawString(s string) { + w.buf = append(w.buf, s...) +} + +// Uint64 appends n to the buffer. +func (w *Writer) Uint64(n uint64) { + w.buf = strconv.AppendUint(w.buf, uint64(n), 10) +} + +// Int64 appends n to the buffer. +func (w *Writer) Int64(n int64) { + w.buf = strconv.AppendInt(w.buf, int64(n), 10) +} + +// Float32 appends n to the buffer. +func (w *Writer) Float32(n float32) { + w.buf = strconv.AppendFloat(w.buf, float64(n), 'g', -1, 32) +} + +// Float64 appends n to the buffer. +func (w *Writer) Float64(n float64) { + w.buf = strconv.AppendFloat(w.buf, float64(n), 'g', -1, 64) +} + +// Bool appends v to the buffer. +func (w *Writer) Bool(v bool) { + w.buf = strconv.AppendBool(w.buf, v) +} + +// Time appends t to the buffer, formatted according to layout. +// +// The encoded time is not surrounded by quotes; it is the +// responsibility of the caller to ensure the formatted time is +// quoted as necessary. +func (w *Writer) Time(t time.Time, layout string) { + w.buf = t.AppendFormat(w.buf, layout) +} + +// String appends s, quoted and escaped, to the buffer. +func (w *Writer) String(s string) { + w.RawByte('"') + w.StringContents(s) + w.RawByte('"') +} + +// Note: code below taken from mailru/easyjson, adapted to use Writer. + +const chars = "0123456789abcdef" + +func isNotEscapedSingleChar(c byte, escapeHTML bool) bool { + // Note: might make sense to use a table if there are more chars to escape. With 4 chars + // it benchmarks the same. + if escapeHTML { + return c != '<' && c != '>' && c != '&' && c != '\\' && c != '"' && c >= 0x20 && c < utf8.RuneSelf + } + return c != '\\' && c != '"' && c >= 0x20 && c < utf8.RuneSelf +} + +// StringContents is the same as String, but without the surrounding quotes. +func (w *Writer) StringContents(s string) { + // Portions of the string that contain no escapes are appended as byte slices. + + p := 0 // last non-escape symbol + + for i := 0; i < len(s); { + c := s[i] + + if isNotEscapedSingleChar(c, true) { + // single-width character, no escaping is required + i++ + continue + } else if c < utf8.RuneSelf { + // single-with character, need to escape + w.RawString(s[p:i]) + switch c { + case '\t': + w.RawString(`\t`) + case '\r': + w.RawString(`\r`) + case '\n': + w.RawString(`\n`) + case '\\': + w.RawString(`\\`) + case '"': + w.RawString(`\"`) + default: + w.RawString(`\u00`) + w.RawByte(chars[c>>4]) + w.RawByte(chars[c&0xf]) + } + + i++ + p = i + continue + } + + // broken utf + runeValue, runeWidth := utf8.DecodeRuneInString(s[i:]) + if runeValue == utf8.RuneError && runeWidth == 1 { + w.RawString(s[p:i]) + w.RawString(`\ufffd`) + i++ + p = i + continue + } + + // jsonp stuff - tab separator and line separator + if runeValue == '\u2028' || runeValue == '\u2029' { + w.RawString(s[p:i]) + w.RawString(`\u202`) + w.RawByte(chars[runeValue&0xf]) + i += runeWidth + p = i + continue + } + i += runeWidth + } + w.RawString(s[p:]) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index ba922d80e82..20ac6753395 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -118,6 +118,8 @@ github.com/akavel/rsrc/ico github.com/andrewkroh/sys/windows/svc/eventlog # github.com/antlr/antlr4 v0.0.0-20200326173327-a4c66dc863bb github.com/antlr/antlr4/runtime/Go/antlr +# github.com/armon/go-radix v1.0.0 +github.com/armon/go-radix # github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 github.com/armon/go-socks5 # github.com/aws/aws-lambda-go v1.6.0 @@ -736,6 +738,12 @@ github.com/samuel/go-thrift/parser github.com/sanathkr/go-yaml # github.com/sanathkr/yaml v1.0.1-0.20170819201035-0056894fa522 github.com/sanathkr/yaml +# github.com/santhosh-tekuri/jsonschema v1.2.4 +github.com/santhosh-tekuri/jsonschema +github.com/santhosh-tekuri/jsonschema/decoders +github.com/santhosh-tekuri/jsonschema/formats +github.com/santhosh-tekuri/jsonschema/loader +github.com/santhosh-tekuri/jsonschema/mediatypes # github.com/shirou/gopsutil v2.19.11+incompatible github.com/shirou/gopsutil/disk github.com/shirou/gopsutil/internal/common @@ -752,6 +760,7 @@ github.com/stretchr/objx github.com/stretchr/testify/assert github.com/stretchr/testify/mock github.com/stretchr/testify/require +github.com/stretchr/testify/suite # github.com/tsg/go-daemon v0.0.0-20200207173439-e704b93fd89b github.com/tsg/go-daemon # github.com/tsg/gopacket v0.0.0-20190320122513-dd3d0e41124a @@ -807,6 +816,32 @@ github.com/yuin/gopher-lua github.com/yuin/gopher-lua/ast github.com/yuin/gopher-lua/parse github.com/yuin/gopher-lua/pm +# go.elastic.co/apm v1.7.2 +go.elastic.co/apm +go.elastic.co/apm/apmconfig +go.elastic.co/apm/apmtest +go.elastic.co/apm/internal/apmcontext +go.elastic.co/apm/internal/apmhostutil +go.elastic.co/apm/internal/apmhttputil +go.elastic.co/apm/internal/apmlog +go.elastic.co/apm/internal/apmschema +go.elastic.co/apm/internal/apmstrings +go.elastic.co/apm/internal/apmversion +go.elastic.co/apm/internal/configutil +go.elastic.co/apm/internal/iochan +go.elastic.co/apm/internal/pkgerrorsutil +go.elastic.co/apm/internal/ringbuffer +go.elastic.co/apm/internal/wildcard +go.elastic.co/apm/model +go.elastic.co/apm/stacktrace +go.elastic.co/apm/transport +go.elastic.co/apm/transport/transporttest +# go.elastic.co/apm/module/apmelasticsearch v1.7.2 +go.elastic.co/apm/module/apmelasticsearch +# go.elastic.co/apm/module/apmhttp v1.7.2 +go.elastic.co/apm/module/apmhttp +# go.elastic.co/fastjson v1.0.0 +go.elastic.co/fastjson # go.opencensus.io v0.22.2 go.opencensus.io go.opencensus.io/internal